1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/icBuffer.hpp" 27 #include "gc_implementation/g1/bufferingOopClosure.hpp" 28 #include "gc_implementation/g1/concurrentG1Refine.hpp" 29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp" 30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 31 #include "gc_implementation/g1/concurrentZFThread.hpp" 32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 33 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 34 #include "gc_implementation/g1/g1MarkSweep.hpp" 35 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 36 #include "gc_implementation/g1/g1RemSet.inline.hpp" 37 #include "gc_implementation/g1/heapRegionRemSet.hpp" 38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 39 #include "gc_implementation/g1/vm_operations_g1.hpp" 40 #include "gc_implementation/shared/isGCActiveMark.hpp" 41 #include "memory/gcLocker.inline.hpp" 42 #include "memory/genOopClosures.inline.hpp" 43 #include "memory/generationSpec.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "oops/oop.pcgc.inline.hpp" 46 #include "runtime/aprofiler.hpp" 47 #include "runtime/vmThread.hpp" 48 49 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; 50 51 // turn it on so that the contents of the young list (scan-only / 52 // to-be-collected) are printed at "strategic" points before / during 53 // / after the collection --- this is useful for debugging 54 #define YOUNG_LIST_VERBOSE 0 55 // CURRENT STATUS 56 // This file is under construction. Search for "FIXME". 57 58 // INVARIANTS/NOTES 59 // 60 // All allocation activity covered by the G1CollectedHeap interface is 61 // serialized by acquiring the HeapLock. This happens in 62 // mem_allocate_work, which all such allocation functions call. 63 // (Note that this does not apply to TLAB allocation, which is not part 64 // of this interface: it is done by clients of this interface.) 65 66 // Local to this file. 67 68 class RefineCardTableEntryClosure: public CardTableEntryClosure { 69 SuspendibleThreadSet* _sts; 70 G1RemSet* _g1rs; 71 ConcurrentG1Refine* _cg1r; 72 bool _concurrent; 73 public: 74 RefineCardTableEntryClosure(SuspendibleThreadSet* sts, 75 G1RemSet* g1rs, 76 ConcurrentG1Refine* cg1r) : 77 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) 78 {} 79 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 80 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false); 81 // This path is executed by the concurrent refine or mutator threads, 82 // concurrently, and so we do not care if card_ptr contains references 83 // that point into the collection set. 84 assert(!oops_into_cset, "should be"); 85 86 if (_concurrent && _sts->should_yield()) { 87 // Caller will actually yield. 88 return false; 89 } 90 // Otherwise, we finished successfully; return true. 91 return true; 92 } 93 void set_concurrent(bool b) { _concurrent = b; } 94 }; 95 96 97 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { 98 int _calls; 99 G1CollectedHeap* _g1h; 100 CardTableModRefBS* _ctbs; 101 int _histo[256]; 102 public: 103 ClearLoggedCardTableEntryClosure() : 104 _calls(0) 105 { 106 _g1h = G1CollectedHeap::heap(); 107 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); 108 for (int i = 0; i < 256; i++) _histo[i] = 0; 109 } 110 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 111 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 112 _calls++; 113 unsigned char* ujb = (unsigned char*)card_ptr; 114 int ind = (int)(*ujb); 115 _histo[ind]++; 116 *card_ptr = -1; 117 } 118 return true; 119 } 120 int calls() { return _calls; } 121 void print_histo() { 122 gclog_or_tty->print_cr("Card table value histogram:"); 123 for (int i = 0; i < 256; i++) { 124 if (_histo[i] != 0) { 125 gclog_or_tty->print_cr(" %d: %d", i, _histo[i]); 126 } 127 } 128 } 129 }; 130 131 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure { 132 int _calls; 133 G1CollectedHeap* _g1h; 134 CardTableModRefBS* _ctbs; 135 public: 136 RedirtyLoggedCardTableEntryClosure() : 137 _calls(0) 138 { 139 _g1h = G1CollectedHeap::heap(); 140 _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); 141 } 142 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 143 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 144 _calls++; 145 *card_ptr = 0; 146 } 147 return true; 148 } 149 int calls() { return _calls; } 150 }; 151 152 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure { 153 public: 154 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 155 *card_ptr = CardTableModRefBS::dirty_card_val(); 156 return true; 157 } 158 }; 159 160 YoungList::YoungList(G1CollectedHeap* g1h) 161 : _g1h(g1h), _head(NULL), 162 _length(0), 163 _last_sampled_rs_lengths(0), 164 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) 165 { 166 guarantee( check_list_empty(false), "just making sure..." ); 167 } 168 169 void YoungList::push_region(HeapRegion *hr) { 170 assert(!hr->is_young(), "should not already be young"); 171 assert(hr->get_next_young_region() == NULL, "cause it should!"); 172 173 hr->set_next_young_region(_head); 174 _head = hr; 175 176 hr->set_young(); 177 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); 178 ++_length; 179 } 180 181 void YoungList::add_survivor_region(HeapRegion* hr) { 182 assert(hr->is_survivor(), "should be flagged as survivor region"); 183 assert(hr->get_next_young_region() == NULL, "cause it should!"); 184 185 hr->set_next_young_region(_survivor_head); 186 if (_survivor_head == NULL) { 187 _survivor_tail = hr; 188 } 189 _survivor_head = hr; 190 191 ++_survivor_length; 192 } 193 194 void YoungList::empty_list(HeapRegion* list) { 195 while (list != NULL) { 196 HeapRegion* next = list->get_next_young_region(); 197 list->set_next_young_region(NULL); 198 list->uninstall_surv_rate_group(); 199 list->set_not_young(); 200 list = next; 201 } 202 } 203 204 void YoungList::empty_list() { 205 assert(check_list_well_formed(), "young list should be well formed"); 206 207 empty_list(_head); 208 _head = NULL; 209 _length = 0; 210 211 empty_list(_survivor_head); 212 _survivor_head = NULL; 213 _survivor_tail = NULL; 214 _survivor_length = 0; 215 216 _last_sampled_rs_lengths = 0; 217 218 assert(check_list_empty(false), "just making sure..."); 219 } 220 221 bool YoungList::check_list_well_formed() { 222 bool ret = true; 223 224 size_t length = 0; 225 HeapRegion* curr = _head; 226 HeapRegion* last = NULL; 227 while (curr != NULL) { 228 if (!curr->is_young()) { 229 gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " 230 "incorrectly tagged (y: %d, surv: %d)", 231 curr->bottom(), curr->end(), 232 curr->is_young(), curr->is_survivor()); 233 ret = false; 234 } 235 ++length; 236 last = curr; 237 curr = curr->get_next_young_region(); 238 } 239 ret = ret && (length == _length); 240 241 if (!ret) { 242 gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); 243 gclog_or_tty->print_cr("### list has %d entries, _length is %d", 244 length, _length); 245 } 246 247 return ret; 248 } 249 250 bool YoungList::check_list_empty(bool check_sample) { 251 bool ret = true; 252 253 if (_length != 0) { 254 gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", 255 _length); 256 ret = false; 257 } 258 if (check_sample && _last_sampled_rs_lengths != 0) { 259 gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths"); 260 ret = false; 261 } 262 if (_head != NULL) { 263 gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head"); 264 ret = false; 265 } 266 if (!ret) { 267 gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); 268 } 269 270 return ret; 271 } 272 273 void 274 YoungList::rs_length_sampling_init() { 275 _sampled_rs_lengths = 0; 276 _curr = _head; 277 } 278 279 bool 280 YoungList::rs_length_sampling_more() { 281 return _curr != NULL; 282 } 283 284 void 285 YoungList::rs_length_sampling_next() { 286 assert( _curr != NULL, "invariant" ); 287 size_t rs_length = _curr->rem_set()->occupied(); 288 289 _sampled_rs_lengths += rs_length; 290 291 // The current region may not yet have been added to the 292 // incremental collection set (it gets added when it is 293 // retired as the current allocation region). 294 if (_curr->in_collection_set()) { 295 // Update the collection set policy information for this region 296 _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); 297 } 298 299 _curr = _curr->get_next_young_region(); 300 if (_curr == NULL) { 301 _last_sampled_rs_lengths = _sampled_rs_lengths; 302 // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths); 303 } 304 } 305 306 void 307 YoungList::reset_auxilary_lists() { 308 guarantee( is_empty(), "young list should be empty" ); 309 assert(check_list_well_formed(), "young list should be well formed"); 310 311 // Add survivor regions to SurvRateGroup. 312 _g1h->g1_policy()->note_start_adding_survivor_regions(); 313 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); 314 315 for (HeapRegion* curr = _survivor_head; 316 curr != NULL; 317 curr = curr->get_next_young_region()) { 318 _g1h->g1_policy()->set_region_survivors(curr); 319 320 // The region is a non-empty survivor so let's add it to 321 // the incremental collection set for the next evacuation 322 // pause. 323 _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); 324 } 325 _g1h->g1_policy()->note_stop_adding_survivor_regions(); 326 327 _head = _survivor_head; 328 _length = _survivor_length; 329 if (_survivor_head != NULL) { 330 assert(_survivor_tail != NULL, "cause it shouldn't be"); 331 assert(_survivor_length > 0, "invariant"); 332 _survivor_tail->set_next_young_region(NULL); 333 } 334 335 // Don't clear the survivor list handles until the start of 336 // the next evacuation pause - we need it in order to re-tag 337 // the survivor regions from this evacuation pause as 'young' 338 // at the start of the next. 339 340 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); 341 342 assert(check_list_well_formed(), "young list should be well formed"); 343 } 344 345 void YoungList::print() { 346 HeapRegion* lists[] = {_head, _survivor_head}; 347 const char* names[] = {"YOUNG", "SURVIVOR"}; 348 349 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { 350 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); 351 HeapRegion *curr = lists[list]; 352 if (curr == NULL) 353 gclog_or_tty->print_cr(" empty"); 354 while (curr != NULL) { 355 gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " 356 "age: %4d, y: %d, surv: %d", 357 curr->bottom(), curr->end(), 358 curr->top(), 359 curr->prev_top_at_mark_start(), 360 curr->next_top_at_mark_start(), 361 curr->top_at_conc_mark_count(), 362 curr->age_in_surv_rate_group_cond(), 363 curr->is_young(), 364 curr->is_survivor()); 365 curr = curr->get_next_young_region(); 366 } 367 } 368 369 gclog_or_tty->print_cr(""); 370 } 371 372 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) 373 { 374 // Claim the right to put the region on the dirty cards region list 375 // by installing a self pointer. 376 HeapRegion* next = hr->get_next_dirty_cards_region(); 377 if (next == NULL) { 378 HeapRegion* res = (HeapRegion*) 379 Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(), 380 NULL); 381 if (res == NULL) { 382 HeapRegion* head; 383 do { 384 // Put the region to the dirty cards region list. 385 head = _dirty_cards_region_list; 386 next = (HeapRegion*) 387 Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head); 388 if (next == head) { 389 assert(hr->get_next_dirty_cards_region() == hr, 390 "hr->get_next_dirty_cards_region() != hr"); 391 if (next == NULL) { 392 // The last region in the list points to itself. 393 hr->set_next_dirty_cards_region(hr); 394 } else { 395 hr->set_next_dirty_cards_region(next); 396 } 397 } 398 } while (next != head); 399 } 400 } 401 } 402 403 HeapRegion* G1CollectedHeap::pop_dirty_cards_region() 404 { 405 HeapRegion* head; 406 HeapRegion* hr; 407 do { 408 head = _dirty_cards_region_list; 409 if (head == NULL) { 410 return NULL; 411 } 412 HeapRegion* new_head = head->get_next_dirty_cards_region(); 413 if (head == new_head) { 414 // The last region. 415 new_head = NULL; 416 } 417 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list, 418 head); 419 } while (hr != head); 420 assert(hr != NULL, "invariant"); 421 hr->set_next_dirty_cards_region(NULL); 422 return hr; 423 } 424 425 void G1CollectedHeap::stop_conc_gc_threads() { 426 _cg1r->stop(); 427 _czft->stop(); 428 _cmThread->stop(); 429 } 430 431 432 void G1CollectedHeap::check_ct_logs_at_safepoint() { 433 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 434 CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); 435 436 // Count the dirty cards at the start. 437 CountNonCleanMemRegionClosure count1(this); 438 ct_bs->mod_card_iterate(&count1); 439 int orig_count = count1.n(); 440 441 // First clear the logged cards. 442 ClearLoggedCardTableEntryClosure clear; 443 dcqs.set_closure(&clear); 444 dcqs.apply_closure_to_all_completed_buffers(); 445 dcqs.iterate_closure_all_threads(false); 446 clear.print_histo(); 447 448 // Now ensure that there's no dirty cards. 449 CountNonCleanMemRegionClosure count2(this); 450 ct_bs->mod_card_iterate(&count2); 451 if (count2.n() != 0) { 452 gclog_or_tty->print_cr("Card table has %d entries; %d originally", 453 count2.n(), orig_count); 454 } 455 guarantee(count2.n() == 0, "Card table should be clean."); 456 457 RedirtyLoggedCardTableEntryClosure redirty; 458 JavaThread::dirty_card_queue_set().set_closure(&redirty); 459 dcqs.apply_closure_to_all_completed_buffers(); 460 dcqs.iterate_closure_all_threads(false); 461 gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.", 462 clear.calls(), orig_count); 463 guarantee(redirty.calls() == clear.calls(), 464 "Or else mechanism is broken."); 465 466 CountNonCleanMemRegionClosure count3(this); 467 ct_bs->mod_card_iterate(&count3); 468 if (count3.n() != orig_count) { 469 gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.", 470 orig_count, count3.n()); 471 guarantee(count3.n() >= orig_count, "Should have restored them all."); 472 } 473 474 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); 475 } 476 477 // Private class members. 478 479 G1CollectedHeap* G1CollectedHeap::_g1h; 480 481 // Private methods. 482 483 // Finds a HeapRegion that can be used to allocate a given size of block. 484 485 486 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size, 487 bool do_expand, 488 bool zero_filled) { 489 ConcurrentZFThread::note_region_alloc(); 490 HeapRegion* res = alloc_free_region_from_lists(zero_filled); 491 if (res == NULL && do_expand) { 492 expand(word_size * HeapWordSize); 493 res = alloc_free_region_from_lists(zero_filled); 494 assert(res == NULL || 495 (!res->isHumongous() && 496 (!zero_filled || 497 res->zero_fill_state() == HeapRegion::Allocated)), 498 "Alloc Regions must be zero filled (and non-H)"); 499 } 500 if (res != NULL) { 501 if (res->is_empty()) { 502 _free_regions--; 503 } 504 assert(!res->isHumongous() && 505 (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated), 506 err_msg("Non-young alloc Regions must be zero filled (and non-H):" 507 " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d", 508 res->isHumongous(), zero_filled, res->zero_fill_state())); 509 assert(!res->is_on_unclean_list(), 510 "Alloc Regions must not be on the unclean list"); 511 if (G1PrintHeapRegions) { 512 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " 513 "top "PTR_FORMAT, 514 res->hrs_index(), res->bottom(), res->end(), res->top()); 515 } 516 } 517 return res; 518 } 519 520 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose, 521 size_t word_size, 522 bool zero_filled) { 523 HeapRegion* alloc_region = NULL; 524 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { 525 alloc_region = newAllocRegion_work(word_size, true, zero_filled); 526 if (purpose == GCAllocForSurvived && alloc_region != NULL) { 527 alloc_region->set_survivor(); 528 } 529 ++_gc_alloc_region_counts[purpose]; 530 } else { 531 g1_policy()->note_alloc_region_limit_reached(purpose); 532 } 533 return alloc_region; 534 } 535 536 // If could fit into free regions w/o expansion, try. 537 // Otherwise, if can expand, do so. 538 // Otherwise, if using ex regions might help, try with ex given back. 539 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { 540 assert(regions_accounted_for(), "Region leakage!"); 541 542 // We can't allocate H regions while cleanupComplete is running, since 543 // some of the regions we find to be empty might not yet be added to the 544 // unclean list. (If we're already at a safepoint, this call is 545 // unnecessary, not to mention wrong.) 546 if (!SafepointSynchronize::is_at_safepoint()) 547 wait_for_cleanup_complete(); 548 549 size_t num_regions = 550 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; 551 552 // Special case if < one region??? 553 554 // Remember the ft size. 555 size_t x_size = expansion_regions(); 556 557 HeapWord* res = NULL; 558 bool eliminated_allocated_from_lists = false; 559 560 // Can the allocation potentially fit in the free regions? 561 if (free_regions() >= num_regions) { 562 res = _hrs->obj_allocate(word_size); 563 } 564 if (res == NULL) { 565 // Try expansion. 566 size_t fs = _hrs->free_suffix(); 567 if (fs + x_size >= num_regions) { 568 expand((num_regions - fs) * HeapRegion::GrainBytes); 569 res = _hrs->obj_allocate(word_size); 570 assert(res != NULL, "This should have worked."); 571 } else { 572 // Expansion won't help. Are there enough free regions if we get rid 573 // of reservations? 574 size_t avail = free_regions(); 575 if (avail >= num_regions) { 576 res = _hrs->obj_allocate(word_size); 577 if (res != NULL) { 578 remove_allocated_regions_from_lists(); 579 eliminated_allocated_from_lists = true; 580 } 581 } 582 } 583 } 584 if (res != NULL) { 585 // Increment by the number of regions allocated. 586 // FIXME: Assumes regions all of size GrainBytes. 587 #ifndef PRODUCT 588 mr_bs()->verify_clean_region(MemRegion(res, res + num_regions * 589 HeapRegion::GrainWords)); 590 #endif 591 if (!eliminated_allocated_from_lists) 592 remove_allocated_regions_from_lists(); 593 _summary_bytes_used += word_size * HeapWordSize; 594 _free_regions -= num_regions; 595 _num_humongous_regions += (int) num_regions; 596 } 597 assert(regions_accounted_for(), "Region Leakage"); 598 return res; 599 } 600 601 HeapWord* 602 G1CollectedHeap::attempt_allocation_slow(size_t word_size, 603 bool permit_collection_pause) { 604 HeapWord* res = NULL; 605 HeapRegion* allocated_young_region = NULL; 606 607 assert( SafepointSynchronize::is_at_safepoint() || 608 Heap_lock->owned_by_self(), "pre condition of the call" ); 609 610 if (isHumongous(word_size)) { 611 // Allocation of a humongous object can, in a sense, complete a 612 // partial region, if the previous alloc was also humongous, and 613 // caused the test below to succeed. 614 if (permit_collection_pause) 615 do_collection_pause_if_appropriate(word_size); 616 res = humongousObjAllocate(word_size); 617 assert(_cur_alloc_region == NULL 618 || !_cur_alloc_region->isHumongous(), 619 "Prevent a regression of this bug."); 620 621 } else { 622 // We may have concurrent cleanup working at the time. Wait for it 623 // to complete. In the future we would probably want to make the 624 // concurrent cleanup truly concurrent by decoupling it from the 625 // allocation. 626 if (!SafepointSynchronize::is_at_safepoint()) 627 wait_for_cleanup_complete(); 628 // If we do a collection pause, this will be reset to a non-NULL 629 // value. If we don't, nulling here ensures that we allocate a new 630 // region below. 631 if (_cur_alloc_region != NULL) { 632 // We're finished with the _cur_alloc_region. 633 // As we're builing (at least the young portion) of the collection 634 // set incrementally we'll add the current allocation region to 635 // the collection set here. 636 if (_cur_alloc_region->is_young()) { 637 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); 638 } 639 _summary_bytes_used += _cur_alloc_region->used(); 640 _cur_alloc_region = NULL; 641 } 642 assert(_cur_alloc_region == NULL, "Invariant."); 643 // Completion of a heap region is perhaps a good point at which to do 644 // a collection pause. 645 if (permit_collection_pause) 646 do_collection_pause_if_appropriate(word_size); 647 // Make sure we have an allocation region available. 648 if (_cur_alloc_region == NULL) { 649 if (!SafepointSynchronize::is_at_safepoint()) 650 wait_for_cleanup_complete(); 651 bool next_is_young = should_set_young_locked(); 652 // If the next region is not young, make sure it's zero-filled. 653 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); 654 if (_cur_alloc_region != NULL) { 655 _summary_bytes_used -= _cur_alloc_region->used(); 656 if (next_is_young) { 657 set_region_short_lived_locked(_cur_alloc_region); 658 allocated_young_region = _cur_alloc_region; 659 } 660 } 661 } 662 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), 663 "Prevent a regression of this bug."); 664 665 // Now retry the allocation. 666 if (_cur_alloc_region != NULL) { 667 if (allocated_young_region != NULL) { 668 // We need to ensure that the store to top does not 669 // float above the setting of the young type. 670 OrderAccess::storestore(); 671 } 672 res = _cur_alloc_region->allocate(word_size); 673 } 674 } 675 676 // NOTE: fails frequently in PRT 677 assert(regions_accounted_for(), "Region leakage!"); 678 679 if (res != NULL) { 680 if (!SafepointSynchronize::is_at_safepoint()) { 681 assert( permit_collection_pause, "invariant" ); 682 assert( Heap_lock->owned_by_self(), "invariant" ); 683 Heap_lock->unlock(); 684 } 685 686 if (allocated_young_region != NULL) { 687 HeapRegion* hr = allocated_young_region; 688 HeapWord* bottom = hr->bottom(); 689 HeapWord* end = hr->end(); 690 MemRegion mr(bottom, end); 691 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); 692 } 693 } 694 695 assert( SafepointSynchronize::is_at_safepoint() || 696 (res == NULL && Heap_lock->owned_by_self()) || 697 (res != NULL && !Heap_lock->owned_by_self()), 698 "post condition of the call" ); 699 700 return res; 701 } 702 703 HeapWord* 704 G1CollectedHeap::mem_allocate(size_t word_size, 705 bool is_noref, 706 bool is_tlab, 707 bool* gc_overhead_limit_was_exceeded) { 708 debug_only(check_for_valid_allocation_state()); 709 assert(no_gc_in_progress(), "Allocation during gc not allowed"); 710 HeapWord* result = NULL; 711 712 // Loop until the allocation is satisified, 713 // or unsatisfied after GC. 714 for (int try_count = 1; /* return or throw */; try_count += 1) { 715 int gc_count_before; 716 { 717 Heap_lock->lock(); 718 result = attempt_allocation(word_size); 719 if (result != NULL) { 720 // attempt_allocation should have unlocked the heap lock 721 assert(is_in(result), "result not in heap"); 722 return result; 723 } 724 // Read the gc count while the heap lock is held. 725 gc_count_before = SharedHeap::heap()->total_collections(); 726 Heap_lock->unlock(); 727 } 728 729 // Create the garbage collection operation... 730 VM_G1CollectForAllocation op(word_size, 731 gc_count_before); 732 733 // ...and get the VM thread to execute it. 734 VMThread::execute(&op); 735 if (op.prologue_succeeded()) { 736 result = op.result(); 737 assert(result == NULL || is_in(result), "result not in heap"); 738 return result; 739 } 740 741 // Give a warning if we seem to be looping forever. 742 if ((QueuedAllocationWarningCount > 0) && 743 (try_count % QueuedAllocationWarningCount == 0)) { 744 warning("G1CollectedHeap::mem_allocate_work retries %d times", 745 try_count); 746 } 747 } 748 } 749 750 void G1CollectedHeap::abandon_cur_alloc_region() { 751 if (_cur_alloc_region != NULL) { 752 // We're finished with the _cur_alloc_region. 753 if (_cur_alloc_region->is_empty()) { 754 _free_regions++; 755 free_region(_cur_alloc_region); 756 } else { 757 // As we're builing (at least the young portion) of the collection 758 // set incrementally we'll add the current allocation region to 759 // the collection set here. 760 if (_cur_alloc_region->is_young()) { 761 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); 762 } 763 _summary_bytes_used += _cur_alloc_region->used(); 764 } 765 _cur_alloc_region = NULL; 766 } 767 } 768 769 void G1CollectedHeap::abandon_gc_alloc_regions() { 770 // first, make sure that the GC alloc region list is empty (it should!) 771 assert(_gc_alloc_region_list == NULL, "invariant"); 772 release_gc_alloc_regions(true /* totally */); 773 } 774 775 class PostMCRemSetClearClosure: public HeapRegionClosure { 776 ModRefBarrierSet* _mr_bs; 777 public: 778 PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 779 bool doHeapRegion(HeapRegion* r) { 780 r->reset_gc_time_stamp(); 781 if (r->continuesHumongous()) 782 return false; 783 HeapRegionRemSet* hrrs = r->rem_set(); 784 if (hrrs != NULL) hrrs->clear(); 785 // You might think here that we could clear just the cards 786 // corresponding to the used region. But no: if we leave a dirty card 787 // in a region we might allocate into, then it would prevent that card 788 // from being enqueued, and cause it to be missed. 789 // Re: the performance cost: we shouldn't be doing full GC anyway! 790 _mr_bs->clear(MemRegion(r->bottom(), r->end())); 791 return false; 792 } 793 }; 794 795 796 class PostMCRemSetInvalidateClosure: public HeapRegionClosure { 797 ModRefBarrierSet* _mr_bs; 798 public: 799 PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {} 800 bool doHeapRegion(HeapRegion* r) { 801 if (r->continuesHumongous()) return false; 802 if (r->used_region().word_size() != 0) { 803 _mr_bs->invalidate(r->used_region(), true /*whole heap*/); 804 } 805 return false; 806 } 807 }; 808 809 class RebuildRSOutOfRegionClosure: public HeapRegionClosure { 810 G1CollectedHeap* _g1h; 811 UpdateRSOopClosure _cl; 812 int _worker_i; 813 public: 814 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : 815 _cl(g1->g1_rem_set(), worker_i), 816 _worker_i(worker_i), 817 _g1h(g1) 818 { } 819 bool doHeapRegion(HeapRegion* r) { 820 if (!r->continuesHumongous()) { 821 _cl.set_from(r); 822 r->oop_iterate(&_cl); 823 } 824 return false; 825 } 826 }; 827 828 class ParRebuildRSTask: public AbstractGangTask { 829 G1CollectedHeap* _g1; 830 public: 831 ParRebuildRSTask(G1CollectedHeap* g1) 832 : AbstractGangTask("ParRebuildRSTask"), 833 _g1(g1) 834 { } 835 836 void work(int i) { 837 RebuildRSOutOfRegionClosure rebuild_rs(_g1, i); 838 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, 839 HeapRegion::RebuildRSClaimValue); 840 } 841 }; 842 843 void G1CollectedHeap::do_collection(bool explicit_gc, 844 bool clear_all_soft_refs, 845 size_t word_size) { 846 if (GC_locker::check_active_before_gc()) { 847 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 848 } 849 850 ResourceMark rm; 851 852 if (PrintHeapAtGC) { 853 Universe::print_heap_before_gc(); 854 } 855 856 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 857 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); 858 859 const bool do_clear_all_soft_refs = clear_all_soft_refs || 860 collector_policy()->should_clear_all_soft_refs(); 861 862 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); 863 864 { 865 IsGCActiveMark x; 866 867 // Timing 868 bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); 869 assert(!system_gc || explicit_gc, "invariant"); 870 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 871 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 872 TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", 873 PrintGC, true, gclog_or_tty); 874 875 TraceMemoryManagerStats tms(true /* fullGC */); 876 877 double start = os::elapsedTime(); 878 g1_policy()->record_full_collection_start(); 879 880 gc_prologue(true); 881 increment_total_collections(true /* full gc */); 882 883 size_t g1h_prev_used = used(); 884 assert(used() == recalculate_used(), "Should be equal"); 885 886 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { 887 HandleMark hm; // Discard invalid handles created during verification 888 prepare_for_verify(); 889 gclog_or_tty->print(" VerifyBeforeGC:"); 890 Universe::verify(true); 891 } 892 assert(regions_accounted_for(), "Region leakage!"); 893 894 COMPILER2_PRESENT(DerivedPointerTable::clear()); 895 896 // We want to discover references, but not process them yet. 897 // This mode is disabled in 898 // instanceRefKlass::process_discovered_references if the 899 // generation does some collection work, or 900 // instanceRefKlass::enqueue_discovered_references if the 901 // generation returns without doing any work. 902 ref_processor()->disable_discovery(); 903 ref_processor()->abandon_partial_discovery(); 904 ref_processor()->verify_no_references_recorded(); 905 906 // Abandon current iterations of concurrent marking and concurrent 907 // refinement, if any are in progress. 908 concurrent_mark()->abort(); 909 910 // Make sure we'll choose a new allocation region afterwards. 911 abandon_cur_alloc_region(); 912 abandon_gc_alloc_regions(); 913 assert(_cur_alloc_region == NULL, "Invariant."); 914 g1_rem_set()->cleanupHRRS(); 915 tear_down_region_lists(); 916 set_used_regions_to_need_zero_fill(); 917 918 // We may have added regions to the current incremental collection 919 // set between the last GC or pause and now. We need to clear the 920 // incremental collection set and then start rebuilding it afresh 921 // after this full GC. 922 abandon_collection_set(g1_policy()->inc_cset_head()); 923 g1_policy()->clear_incremental_cset(); 924 g1_policy()->stop_incremental_cset_building(); 925 926 if (g1_policy()->in_young_gc_mode()) { 927 empty_young_list(); 928 g1_policy()->set_full_young_gcs(true); 929 } 930 931 // Temporarily make reference _discovery_ single threaded (non-MT). 932 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); 933 934 // Temporarily make refs discovery atomic 935 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); 936 937 // Temporarily clear _is_alive_non_header 938 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); 939 940 ref_processor()->enable_discovery(); 941 ref_processor()->setup_policy(do_clear_all_soft_refs); 942 943 // Do collection work 944 { 945 HandleMark hm; // Discard invalid handles created during gc 946 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); 947 } 948 // Because freeing humongous regions may have added some unclean 949 // regions, it is necessary to tear down again before rebuilding. 950 tear_down_region_lists(); 951 rebuild_region_lists(); 952 953 _summary_bytes_used = recalculate_used(); 954 955 ref_processor()->enqueue_discovered_references(); 956 957 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 958 959 MemoryService::track_memory_usage(); 960 961 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 962 HandleMark hm; // Discard invalid handles created during verification 963 gclog_or_tty->print(" VerifyAfterGC:"); 964 prepare_for_verify(); 965 Universe::verify(false); 966 } 967 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 968 969 reset_gc_time_stamp(); 970 // Since everything potentially moved, we will clear all remembered 971 // sets, and clear all cards. Later we will rebuild remebered 972 // sets. We will also reset the GC time stamps of the regions. 973 PostMCRemSetClearClosure rs_clear(mr_bs()); 974 heap_region_iterate(&rs_clear); 975 976 // Resize the heap if necessary. 977 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); 978 979 if (_cg1r->use_cache()) { 980 _cg1r->clear_and_record_card_counts(); 981 _cg1r->clear_hot_cache(); 982 } 983 984 // Rebuild remembered sets of all regions. 985 986 if (G1CollectedHeap::use_parallel_gc_threads()) { 987 ParRebuildRSTask rebuild_rs_task(this); 988 assert(check_heap_region_claim_values( 989 HeapRegion::InitialClaimValue), "sanity check"); 990 set_par_threads(workers()->total_workers()); 991 workers()->run_task(&rebuild_rs_task); 992 set_par_threads(0); 993 assert(check_heap_region_claim_values( 994 HeapRegion::RebuildRSClaimValue), "sanity check"); 995 reset_heap_region_claim_values(); 996 } else { 997 RebuildRSOutOfRegionClosure rebuild_rs(this); 998 heap_region_iterate(&rebuild_rs); 999 } 1000 1001 if (PrintGC) { 1002 print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); 1003 } 1004 1005 if (true) { // FIXME 1006 // Ask the permanent generation to adjust size for full collections 1007 perm()->compute_new_size(); 1008 } 1009 1010 // Start a new incremental collection set for the next pause 1011 assert(g1_policy()->collection_set() == NULL, "must be"); 1012 g1_policy()->start_incremental_cset_building(); 1013 1014 // Clear the _cset_fast_test bitmap in anticipation of adding 1015 // regions to the incremental collection set for the next 1016 // evacuation pause. 1017 clear_cset_fast_test(); 1018 1019 double end = os::elapsedTime(); 1020 g1_policy()->record_full_collection_end(); 1021 1022 #ifdef TRACESPINNING 1023 ParallelTaskTerminator::print_termination_counts(); 1024 #endif 1025 1026 gc_epilogue(true); 1027 1028 // Discard all rset updates 1029 JavaThread::dirty_card_queue_set().abandon_logs(); 1030 assert(!G1DeferredRSUpdate 1031 || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); 1032 assert(regions_accounted_for(), "Region leakage!"); 1033 } 1034 1035 if (g1_policy()->in_young_gc_mode()) { 1036 _young_list->reset_sampled_info(); 1037 // At this point there should be no regions in the 1038 // entire heap tagged as young. 1039 assert( check_young_list_empty(true /* check_heap */), 1040 "young list should be empty at this point"); 1041 } 1042 1043 // Update the number of full collections that have been completed. 1044 increment_full_collections_completed(false /* outer */); 1045 1046 if (PrintHeapAtGC) { 1047 Universe::print_heap_after_gc(); 1048 } 1049 } 1050 1051 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { 1052 do_collection(true, /* explicit_gc */ 1053 clear_all_soft_refs, 1054 0 /* word_size */); 1055 } 1056 1057 // This code is mostly copied from TenuredGeneration. 1058 void 1059 G1CollectedHeap:: 1060 resize_if_necessary_after_full_collection(size_t word_size) { 1061 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check"); 1062 1063 // Include the current allocation, if any, and bytes that will be 1064 // pre-allocated to support collections, as "used". 1065 const size_t used_after_gc = used(); 1066 const size_t capacity_after_gc = capacity(); 1067 const size_t free_after_gc = capacity_after_gc - used_after_gc; 1068 1069 // This is enforced in arguments.cpp. 1070 assert(MinHeapFreeRatio <= MaxHeapFreeRatio, 1071 "otherwise the code below doesn't make sense"); 1072 1073 // We don't have floating point command-line arguments 1074 const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; 1075 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1076 const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; 1077 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1078 1079 const size_t min_heap_size = collector_policy()->min_heap_byte_size(); 1080 const size_t max_heap_size = collector_policy()->max_heap_byte_size(); 1081 1082 // We have to be careful here as these two calculations can overflow 1083 // 32-bit size_t's. 1084 double used_after_gc_d = (double) used_after_gc; 1085 double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; 1086 double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; 1087 1088 // Let's make sure that they are both under the max heap size, which 1089 // by default will make them fit into a size_t. 1090 double desired_capacity_upper_bound = (double) max_heap_size; 1091 minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, 1092 desired_capacity_upper_bound); 1093 maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, 1094 desired_capacity_upper_bound); 1095 1096 // We can now safely turn them into size_t's. 1097 size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; 1098 size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; 1099 1100 // This assert only makes sense here, before we adjust them 1101 // with respect to the min and max heap size. 1102 assert(minimum_desired_capacity <= maximum_desired_capacity, 1103 err_msg("minimum_desired_capacity = "SIZE_FORMAT", " 1104 "maximum_desired_capacity = "SIZE_FORMAT, 1105 minimum_desired_capacity, maximum_desired_capacity)); 1106 1107 // Should not be greater than the heap max size. No need to adjust 1108 // it with respect to the heap min size as it's a lower bound (i.e., 1109 // we'll try to make the capacity larger than it, not smaller). 1110 minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size); 1111 // Should not be less than the heap min size. No need to adjust it 1112 // with respect to the heap max size as it's an upper bound (i.e., 1113 // we'll try to make the capacity smaller than it, not greater). 1114 maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); 1115 1116 if (PrintGC && Verbose) { 1117 const double free_percentage = 1118 (double) free_after_gc / (double) capacity_after_gc; 1119 gclog_or_tty->print_cr("Computing new size after full GC "); 1120 gclog_or_tty->print_cr(" " 1121 " minimum_free_percentage: %6.2f", 1122 minimum_free_percentage); 1123 gclog_or_tty->print_cr(" " 1124 " maximum_free_percentage: %6.2f", 1125 maximum_free_percentage); 1126 gclog_or_tty->print_cr(" " 1127 " capacity: %6.1fK" 1128 " minimum_desired_capacity: %6.1fK" 1129 " maximum_desired_capacity: %6.1fK", 1130 (double) capacity_after_gc / (double) K, 1131 (double) minimum_desired_capacity / (double) K, 1132 (double) maximum_desired_capacity / (double) K); 1133 gclog_or_tty->print_cr(" " 1134 " free_after_gc: %6.1fK" 1135 " used_after_gc: %6.1fK", 1136 (double) free_after_gc / (double) K, 1137 (double) used_after_gc / (double) K); 1138 gclog_or_tty->print_cr(" " 1139 " free_percentage: %6.2f", 1140 free_percentage); 1141 } 1142 if (capacity_after_gc < minimum_desired_capacity) { 1143 // Don't expand unless it's significant 1144 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; 1145 expand(expand_bytes); 1146 if (PrintGC && Verbose) { 1147 gclog_or_tty->print_cr(" " 1148 " expanding:" 1149 " max_heap_size: %6.1fK" 1150 " minimum_desired_capacity: %6.1fK" 1151 " expand_bytes: %6.1fK", 1152 (double) max_heap_size / (double) K, 1153 (double) minimum_desired_capacity / (double) K, 1154 (double) expand_bytes / (double) K); 1155 } 1156 1157 // No expansion, now see if we want to shrink 1158 } else if (capacity_after_gc > maximum_desired_capacity) { 1159 // Capacity too large, compute shrinking size 1160 size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; 1161 shrink(shrink_bytes); 1162 if (PrintGC && Verbose) { 1163 gclog_or_tty->print_cr(" " 1164 " shrinking:" 1165 " min_heap_size: %6.1fK" 1166 " maximum_desired_capacity: %6.1fK" 1167 " shrink_bytes: %6.1fK", 1168 (double) min_heap_size / (double) K, 1169 (double) maximum_desired_capacity / (double) K, 1170 (double) shrink_bytes / (double) K); 1171 } 1172 } 1173 } 1174 1175 1176 HeapWord* 1177 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { 1178 HeapWord* result = NULL; 1179 1180 // In a G1 heap, we're supposed to keep allocation from failing by 1181 // incremental pauses. Therefore, at least for now, we'll favor 1182 // expansion over collection. (This might change in the future if we can 1183 // do something smarter than full collection to satisfy a failed alloc.) 1184 1185 result = expand_and_allocate(word_size); 1186 if (result != NULL) { 1187 assert(is_in(result), "result not in heap"); 1188 return result; 1189 } 1190 1191 // OK, I guess we have to try collection. 1192 1193 do_collection(false, false, word_size); 1194 1195 result = attempt_allocation(word_size, /*permit_collection_pause*/false); 1196 1197 if (result != NULL) { 1198 assert(is_in(result), "result not in heap"); 1199 return result; 1200 } 1201 1202 // Try collecting soft references. 1203 do_collection(false, true, word_size); 1204 result = attempt_allocation(word_size, /*permit_collection_pause*/false); 1205 if (result != NULL) { 1206 assert(is_in(result), "result not in heap"); 1207 return result; 1208 } 1209 1210 assert(!collector_policy()->should_clear_all_soft_refs(), 1211 "Flag should have been handled and cleared prior to this point"); 1212 1213 // What else? We might try synchronous finalization later. If the total 1214 // space available is large enough for the allocation, then a more 1215 // complete compaction phase than we've tried so far might be 1216 // appropriate. 1217 return NULL; 1218 } 1219 1220 // Attempting to expand the heap sufficiently 1221 // to support an allocation of the given "word_size". If 1222 // successful, perform the allocation and return the address of the 1223 // allocated block, or else "NULL". 1224 1225 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { 1226 size_t expand_bytes = word_size * HeapWordSize; 1227 if (expand_bytes < MinHeapDeltaBytes) { 1228 expand_bytes = MinHeapDeltaBytes; 1229 } 1230 expand(expand_bytes); 1231 assert(regions_accounted_for(), "Region leakage!"); 1232 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); 1233 return result; 1234 } 1235 1236 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { 1237 size_t pre_used = 0; 1238 size_t cleared_h_regions = 0; 1239 size_t freed_regions = 0; 1240 UncleanRegionList local_list; 1241 free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions, 1242 freed_regions, &local_list); 1243 1244 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, 1245 &local_list); 1246 return pre_used; 1247 } 1248 1249 void 1250 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr, 1251 size_t& pre_used, 1252 size_t& cleared_h, 1253 size_t& freed_regions, 1254 UncleanRegionList* list, 1255 bool par) { 1256 assert(!hr->continuesHumongous(), "should have filtered these out"); 1257 size_t res = 0; 1258 if (hr->used() > 0 && hr->garbage_bytes() == hr->used() && 1259 !hr->is_young()) { 1260 if (G1PolicyVerbose > 0) 1261 gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)" 1262 " during cleanup", hr, hr->used()); 1263 free_region_work(hr, pre_used, cleared_h, freed_regions, list, par); 1264 } 1265 } 1266 1267 // FIXME: both this and shrink could probably be more efficient by 1268 // doing one "VirtualSpace::expand_by" call rather than several. 1269 void G1CollectedHeap::expand(size_t expand_bytes) { 1270 size_t old_mem_size = _g1_storage.committed_size(); 1271 // We expand by a minimum of 1K. 1272 expand_bytes = MAX2(expand_bytes, (size_t)K); 1273 size_t aligned_expand_bytes = 1274 ReservedSpace::page_align_size_up(expand_bytes); 1275 aligned_expand_bytes = align_size_up(aligned_expand_bytes, 1276 HeapRegion::GrainBytes); 1277 expand_bytes = aligned_expand_bytes; 1278 while (expand_bytes > 0) { 1279 HeapWord* base = (HeapWord*)_g1_storage.high(); 1280 // Commit more storage. 1281 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); 1282 if (!successful) { 1283 expand_bytes = 0; 1284 } else { 1285 expand_bytes -= HeapRegion::GrainBytes; 1286 // Expand the committed region. 1287 HeapWord* high = (HeapWord*) _g1_storage.high(); 1288 _g1_committed.set_end(high); 1289 // Create a new HeapRegion. 1290 MemRegion mr(base, high); 1291 bool is_zeroed = !_g1_max_committed.contains(base); 1292 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); 1293 1294 // Now update max_committed if necessary. 1295 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); 1296 1297 // Add it to the HeapRegionSeq. 1298 _hrs->insert(hr); 1299 // Set the zero-fill state, according to whether it's already 1300 // zeroed. 1301 { 1302 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 1303 if (is_zeroed) { 1304 hr->set_zero_fill_complete(); 1305 put_free_region_on_list_locked(hr); 1306 } else { 1307 hr->set_zero_fill_needed(); 1308 put_region_on_unclean_list_locked(hr); 1309 } 1310 } 1311 _free_regions++; 1312 // And we used up an expansion region to create it. 1313 _expansion_regions--; 1314 // Tell the cardtable about it. 1315 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1316 // And the offset table as well. 1317 _bot_shared->resize(_g1_committed.word_size()); 1318 } 1319 } 1320 if (Verbose && PrintGC) { 1321 size_t new_mem_size = _g1_storage.committed_size(); 1322 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", 1323 old_mem_size/K, aligned_expand_bytes/K, 1324 new_mem_size/K); 1325 } 1326 } 1327 1328 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) 1329 { 1330 size_t old_mem_size = _g1_storage.committed_size(); 1331 size_t aligned_shrink_bytes = 1332 ReservedSpace::page_align_size_down(shrink_bytes); 1333 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, 1334 HeapRegion::GrainBytes); 1335 size_t num_regions_deleted = 0; 1336 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); 1337 1338 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); 1339 if (mr.byte_size() > 0) 1340 _g1_storage.shrink_by(mr.byte_size()); 1341 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); 1342 1343 _g1_committed.set_end(mr.start()); 1344 _free_regions -= num_regions_deleted; 1345 _expansion_regions += num_regions_deleted; 1346 1347 // Tell the cardtable about it. 1348 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1349 1350 // And the offset table as well. 1351 _bot_shared->resize(_g1_committed.word_size()); 1352 1353 HeapRegionRemSet::shrink_heap(n_regions()); 1354 1355 if (Verbose && PrintGC) { 1356 size_t new_mem_size = _g1_storage.committed_size(); 1357 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK", 1358 old_mem_size/K, aligned_shrink_bytes/K, 1359 new_mem_size/K); 1360 } 1361 } 1362 1363 void G1CollectedHeap::shrink(size_t shrink_bytes) { 1364 release_gc_alloc_regions(true /* totally */); 1365 tear_down_region_lists(); // We will rebuild them in a moment. 1366 shrink_helper(shrink_bytes); 1367 rebuild_region_lists(); 1368 } 1369 1370 // Public methods. 1371 1372 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 1373 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 1374 #endif // _MSC_VER 1375 1376 1377 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : 1378 SharedHeap(policy_), 1379 _g1_policy(policy_), 1380 _dirty_card_queue_set(false), 1381 _into_cset_dirty_card_queue_set(false), 1382 _ref_processor(NULL), 1383 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), 1384 _bot_shared(NULL), 1385 _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"), 1386 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), 1387 _evac_failure_scan_stack(NULL) , 1388 _mark_in_progress(false), 1389 _cg1r(NULL), _czft(NULL), _summary_bytes_used(0), 1390 _cur_alloc_region(NULL), 1391 _refine_cte_cl(NULL), 1392 _free_region_list(NULL), _free_region_list_size(0), 1393 _free_regions(0), 1394 _full_collection(false), 1395 _unclean_region_list(), 1396 _unclean_regions_coming(false), 1397 _young_list(new YoungList(this)), 1398 _gc_time_stamp(0), 1399 _surviving_young_words(NULL), 1400 _full_collections_completed(0), 1401 _in_cset_fast_test(NULL), 1402 _in_cset_fast_test_base(NULL), 1403 _dirty_cards_region_list(NULL) { 1404 _g1h = this; // To catch bugs. 1405 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { 1406 vm_exit_during_initialization("Failed necessary allocation."); 1407 } 1408 1409 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; 1410 1411 int n_queues = MAX2((int)ParallelGCThreads, 1); 1412 _task_queues = new RefToScanQueueSet(n_queues); 1413 1414 int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); 1415 assert(n_rem_sets > 0, "Invariant."); 1416 1417 HeapRegionRemSetIterator** iter_arr = 1418 NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); 1419 for (int i = 0; i < n_queues; i++) { 1420 iter_arr[i] = new HeapRegionRemSetIterator(); 1421 } 1422 _rem_set_iterator = iter_arr; 1423 1424 for (int i = 0; i < n_queues; i++) { 1425 RefToScanQueue* q = new RefToScanQueue(); 1426 q->initialize(); 1427 _task_queues->register_queue(i, q); 1428 } 1429 1430 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 1431 _gc_alloc_regions[ap] = NULL; 1432 _gc_alloc_region_counts[ap] = 0; 1433 _retained_gc_alloc_regions[ap] = NULL; 1434 // by default, we do not retain a GC alloc region for each ap; 1435 // we'll override this, when appropriate, below 1436 _retain_gc_alloc_region[ap] = false; 1437 } 1438 1439 // We will try to remember the last half-full tenured region we 1440 // allocated to at the end of a collection so that we can re-use it 1441 // during the next collection. 1442 _retain_gc_alloc_region[GCAllocForTenured] = true; 1443 1444 guarantee(_task_queues != NULL, "task_queues allocation failure."); 1445 } 1446 1447 jint G1CollectedHeap::initialize() { 1448 CollectedHeap::pre_initialize(); 1449 os::enable_vtime(); 1450 1451 // Necessary to satisfy locking discipline assertions. 1452 1453 MutexLocker x(Heap_lock); 1454 1455 // While there are no constraints in the GC code that HeapWordSize 1456 // be any particular value, there are multiple other areas in the 1457 // system which believe this to be true (e.g. oop->object_size in some 1458 // cases incorrectly returns the size in wordSize units rather than 1459 // HeapWordSize). 1460 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); 1461 1462 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 1463 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 1464 1465 // Ensure that the sizes are properly aligned. 1466 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1467 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap"); 1468 1469 _cg1r = new ConcurrentG1Refine(); 1470 1471 // Reserve the maximum. 1472 PermanentGenerationSpec* pgs = collector_policy()->permanent_generation(); 1473 // Includes the perm-gen. 1474 1475 const size_t total_reserved = max_byte_size + pgs->max_size(); 1476 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); 1477 1478 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), 1479 HeapRegion::GrainBytes, 1480 false /*ism*/, addr); 1481 1482 if (UseCompressedOops) { 1483 if (addr != NULL && !heap_rs.is_reserved()) { 1484 // Failed to reserve at specified address - the requested memory 1485 // region is taken already, for example, by 'java' launcher. 1486 // Try again to reserver heap higher. 1487 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); 1488 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, 1489 false /*ism*/, addr); 1490 if (addr != NULL && !heap_rs0.is_reserved()) { 1491 // Failed to reserve at specified address again - give up. 1492 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); 1493 assert(addr == NULL, ""); 1494 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, 1495 false /*ism*/, addr); 1496 heap_rs = heap_rs1; 1497 } else { 1498 heap_rs = heap_rs0; 1499 } 1500 } 1501 } 1502 1503 if (!heap_rs.is_reserved()) { 1504 vm_exit_during_initialization("Could not reserve enough space for object heap"); 1505 return JNI_ENOMEM; 1506 } 1507 1508 // It is important to do this in a way such that concurrent readers can't 1509 // temporarily think somethings in the heap. (I've actually seen this 1510 // happen in asserts: DLD.) 1511 _reserved.set_word_size(0); 1512 _reserved.set_start((HeapWord*)heap_rs.base()); 1513 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); 1514 1515 _expansion_regions = max_byte_size/HeapRegion::GrainBytes; 1516 1517 _num_humongous_regions = 0; 1518 1519 // Create the gen rem set (and barrier set) for the entire reserved region. 1520 _rem_set = collector_policy()->create_rem_set(_reserved, 2); 1521 set_barrier_set(rem_set()->bs()); 1522 if (barrier_set()->is_a(BarrierSet::ModRef)) { 1523 _mr_bs = (ModRefBarrierSet*)_barrier_set; 1524 } else { 1525 vm_exit_during_initialization("G1 requires a mod ref bs."); 1526 return JNI_ENOMEM; 1527 } 1528 1529 // Also create a G1 rem set. 1530 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { 1531 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); 1532 } else { 1533 vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); 1534 return JNI_ENOMEM; 1535 } 1536 1537 // Carve out the G1 part of the heap. 1538 1539 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); 1540 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), 1541 g1_rs.size()/HeapWordSize); 1542 ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size); 1543 1544 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); 1545 1546 _g1_storage.initialize(g1_rs, 0); 1547 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); 1548 _g1_max_committed = _g1_committed; 1549 _hrs = new HeapRegionSeq(_expansion_regions); 1550 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); 1551 guarantee(_cur_alloc_region == NULL, "from constructor"); 1552 1553 // 6843694 - ensure that the maximum region index can fit 1554 // in the remembered set structures. 1555 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; 1556 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); 1557 1558 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; 1559 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); 1560 guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region, 1561 "too many cards per region"); 1562 1563 _bot_shared = new G1BlockOffsetSharedArray(_reserved, 1564 heap_word_size(init_byte_size)); 1565 1566 _g1h = this; 1567 1568 _in_cset_fast_test_length = max_regions(); 1569 _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); 1570 1571 // We're biasing _in_cset_fast_test to avoid subtracting the 1572 // beginning of the heap every time we want to index; basically 1573 // it's the same with what we do with the card table. 1574 _in_cset_fast_test = _in_cset_fast_test_base - 1575 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); 1576 1577 // Clear the _cset_fast_test bitmap in anticipation of adding 1578 // regions to the incremental collection set for the first 1579 // evacuation pause. 1580 clear_cset_fast_test(); 1581 1582 // Create the ConcurrentMark data structure and thread. 1583 // (Must do this late, so that "max_regions" is defined.) 1584 _cm = new ConcurrentMark(heap_rs, (int) max_regions()); 1585 _cmThread = _cm->cmThread(); 1586 1587 // ...and the concurrent zero-fill thread, if necessary. 1588 if (G1ConcZeroFill) { 1589 _czft = new ConcurrentZFThread(); 1590 } 1591 1592 // Initialize the from_card cache structure of HeapRegionRemSet. 1593 HeapRegionRemSet::init_heap(max_regions()); 1594 1595 // Now expand into the initial heap size. 1596 expand(init_byte_size); 1597 1598 // Perform any initialization actions delegated to the policy. 1599 g1_policy()->init(); 1600 1601 g1_policy()->note_start_of_mark_thread(); 1602 1603 _refine_cte_cl = 1604 new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), 1605 g1_rem_set(), 1606 concurrent_g1_refine()); 1607 JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); 1608 1609 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 1610 SATB_Q_FL_lock, 1611 G1SATBProcessCompletedThreshold, 1612 Shared_SATB_Q_lock); 1613 1614 JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1615 DirtyCardQ_FL_lock, 1616 concurrent_g1_refine()->yellow_zone(), 1617 concurrent_g1_refine()->red_zone(), 1618 Shared_DirtyCardQ_lock); 1619 1620 if (G1DeferredRSUpdate) { 1621 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, 1622 DirtyCardQ_FL_lock, 1623 -1, // never trigger processing 1624 -1, // no limit on length 1625 Shared_DirtyCardQ_lock, 1626 &JavaThread::dirty_card_queue_set()); 1627 } 1628 1629 // Initialize the card queue set used to hold cards containing 1630 // references into the collection set. 1631 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon, 1632 DirtyCardQ_FL_lock, 1633 -1, // never trigger processing 1634 -1, // no limit on length 1635 Shared_DirtyCardQ_lock, 1636 &JavaThread::dirty_card_queue_set()); 1637 1638 // In case we're keeping closure specialization stats, initialize those 1639 // counts and that mechanism. 1640 SpecializationStats::clear(); 1641 1642 _gc_alloc_region_list = NULL; 1643 1644 // Do later initialization work for concurrent refinement. 1645 _cg1r->init(); 1646 1647 return JNI_OK; 1648 } 1649 1650 void G1CollectedHeap::ref_processing_init() { 1651 SharedHeap::ref_processing_init(); 1652 MemRegion mr = reserved_region(); 1653 _ref_processor = ReferenceProcessor::create_ref_processor( 1654 mr, // span 1655 false, // Reference discovery is not atomic 1656 // (though it shouldn't matter here.) 1657 true, // mt_discovery 1658 NULL, // is alive closure: need to fill this in for efficiency 1659 ParallelGCThreads, 1660 ParallelRefProcEnabled, 1661 true); // Setting next fields of discovered 1662 // lists requires a barrier. 1663 } 1664 1665 size_t G1CollectedHeap::capacity() const { 1666 return _g1_committed.byte_size(); 1667 } 1668 1669 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, 1670 DirtyCardQueue* into_cset_dcq, 1671 bool concurrent, 1672 int worker_i) { 1673 // Clean cards in the hot card cache 1674 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq); 1675 1676 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1677 int n_completed_buffers = 0; 1678 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { 1679 n_completed_buffers++; 1680 } 1681 g1_policy()->record_update_rs_processed_buffers(worker_i, 1682 (double) n_completed_buffers); 1683 dcqs.clear_n_completed_buffers(); 1684 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); 1685 } 1686 1687 1688 // Computes the sum of the storage used by the various regions. 1689 1690 size_t G1CollectedHeap::used() const { 1691 assert(Heap_lock->owner() != NULL, 1692 "Should be owned on this thread's behalf."); 1693 size_t result = _summary_bytes_used; 1694 // Read only once in case it is set to NULL concurrently 1695 HeapRegion* hr = _cur_alloc_region; 1696 if (hr != NULL) 1697 result += hr->used(); 1698 return result; 1699 } 1700 1701 size_t G1CollectedHeap::used_unlocked() const { 1702 size_t result = _summary_bytes_used; 1703 return result; 1704 } 1705 1706 class SumUsedClosure: public HeapRegionClosure { 1707 size_t _used; 1708 public: 1709 SumUsedClosure() : _used(0) {} 1710 bool doHeapRegion(HeapRegion* r) { 1711 if (!r->continuesHumongous()) { 1712 _used += r->used(); 1713 } 1714 return false; 1715 } 1716 size_t result() { return _used; } 1717 }; 1718 1719 size_t G1CollectedHeap::recalculate_used() const { 1720 SumUsedClosure blk; 1721 _hrs->iterate(&blk); 1722 return blk.result(); 1723 } 1724 1725 #ifndef PRODUCT 1726 class SumUsedRegionsClosure: public HeapRegionClosure { 1727 size_t _num; 1728 public: 1729 SumUsedRegionsClosure() : _num(0) {} 1730 bool doHeapRegion(HeapRegion* r) { 1731 if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) { 1732 _num += 1; 1733 } 1734 return false; 1735 } 1736 size_t result() { return _num; } 1737 }; 1738 1739 size_t G1CollectedHeap::recalculate_used_regions() const { 1740 SumUsedRegionsClosure blk; 1741 _hrs->iterate(&blk); 1742 return blk.result(); 1743 } 1744 #endif // PRODUCT 1745 1746 size_t G1CollectedHeap::unsafe_max_alloc() { 1747 if (_free_regions > 0) return HeapRegion::GrainBytes; 1748 // otherwise, is there space in the current allocation region? 1749 1750 // We need to store the current allocation region in a local variable 1751 // here. The problem is that this method doesn't take any locks and 1752 // there may be other threads which overwrite the current allocation 1753 // region field. attempt_allocation(), for example, sets it to NULL 1754 // and this can happen *after* the NULL check here but before the call 1755 // to free(), resulting in a SIGSEGV. Note that this doesn't appear 1756 // to be a problem in the optimized build, since the two loads of the 1757 // current allocation region field are optimized away. 1758 HeapRegion* car = _cur_alloc_region; 1759 1760 // FIXME: should iterate over all regions? 1761 if (car == NULL) { 1762 return 0; 1763 } 1764 return car->free(); 1765 } 1766 1767 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { 1768 return 1769 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || 1770 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); 1771 } 1772 1773 void G1CollectedHeap::increment_full_collections_completed(bool outer) { 1774 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); 1775 1776 // We have already incremented _total_full_collections at the start 1777 // of the GC, so total_full_collections() represents how many full 1778 // collections have been started. 1779 unsigned int full_collections_started = total_full_collections(); 1780 1781 // Given that this method is called at the end of a Full GC or of a 1782 // concurrent cycle, and those can be nested (i.e., a Full GC can 1783 // interrupt a concurrent cycle), the number of full collections 1784 // completed should be either one (in the case where there was no 1785 // nesting) or two (when a Full GC interrupted a concurrent cycle) 1786 // behind the number of full collections started. 1787 1788 // This is the case for the inner caller, i.e. a Full GC. 1789 assert(outer || 1790 (full_collections_started == _full_collections_completed + 1) || 1791 (full_collections_started == _full_collections_completed + 2), 1792 err_msg("for inner caller: full_collections_started = %u " 1793 "is inconsistent with _full_collections_completed = %u", 1794 full_collections_started, _full_collections_completed)); 1795 1796 // This is the case for the outer caller, i.e. the concurrent cycle. 1797 assert(!outer || 1798 (full_collections_started == _full_collections_completed + 1), 1799 err_msg("for outer caller: full_collections_started = %u " 1800 "is inconsistent with _full_collections_completed = %u", 1801 full_collections_started, _full_collections_completed)); 1802 1803 _full_collections_completed += 1; 1804 1805 // We need to clear the "in_progress" flag in the CM thread before 1806 // we wake up any waiters (especially when ExplicitInvokesConcurrent 1807 // is set) so that if a waiter requests another System.gc() it doesn't 1808 // incorrectly see that a marking cyle is still in progress. 1809 if (outer) { 1810 _cmThread->clear_in_progress(); 1811 } 1812 1813 // This notify_all() will ensure that a thread that called 1814 // System.gc() with (with ExplicitGCInvokesConcurrent set or not) 1815 // and it's waiting for a full GC to finish will be woken up. It is 1816 // waiting in VM_G1IncCollectionPause::doit_epilogue(). 1817 FullGCCount_lock->notify_all(); 1818 } 1819 1820 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 1821 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 1822 assert(Heap_lock->is_locked(), "Precondition#2"); 1823 GCCauseSetter gcs(this, cause); 1824 switch (cause) { 1825 case GCCause::_heap_inspection: 1826 case GCCause::_heap_dump: { 1827 HandleMark hm; 1828 do_full_collection(false); // don't clear all soft refs 1829 break; 1830 } 1831 default: // XXX FIX ME 1832 ShouldNotReachHere(); // Unexpected use of this function 1833 } 1834 } 1835 1836 void G1CollectedHeap::collect(GCCause::Cause cause) { 1837 // The caller doesn't have the Heap_lock 1838 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 1839 1840 unsigned int gc_count_before; 1841 unsigned int full_gc_count_before; 1842 { 1843 MutexLocker ml(Heap_lock); 1844 // Read the GC count while holding the Heap_lock 1845 gc_count_before = SharedHeap::heap()->total_collections(); 1846 full_gc_count_before = SharedHeap::heap()->total_full_collections(); 1847 1848 // Don't want to do a GC until cleanup is completed. 1849 wait_for_cleanup_complete(); 1850 1851 // We give up heap lock; VMThread::execute gets it back below 1852 } 1853 1854 if (should_do_concurrent_full_gc(cause)) { 1855 // Schedule an initial-mark evacuation pause that will start a 1856 // concurrent cycle. 1857 VM_G1IncCollectionPause op(gc_count_before, 1858 true, /* should_initiate_conc_mark */ 1859 g1_policy()->max_pause_time_ms(), 1860 cause); 1861 VMThread::execute(&op); 1862 } else { 1863 if (cause == GCCause::_gc_locker 1864 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { 1865 1866 // Schedule a standard evacuation pause. 1867 VM_G1IncCollectionPause op(gc_count_before, 1868 false, /* should_initiate_conc_mark */ 1869 g1_policy()->max_pause_time_ms(), 1870 cause); 1871 VMThread::execute(&op); 1872 } else { 1873 // Schedule a Full GC. 1874 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); 1875 VMThread::execute(&op); 1876 } 1877 } 1878 } 1879 1880 bool G1CollectedHeap::is_in(const void* p) const { 1881 if (_g1_committed.contains(p)) { 1882 HeapRegion* hr = _hrs->addr_to_region(p); 1883 return hr->is_in(p); 1884 } else { 1885 return _perm_gen->as_gen()->is_in(p); 1886 } 1887 } 1888 1889 // Iteration functions. 1890 1891 // Iterates an OopClosure over all ref-containing fields of objects 1892 // within a HeapRegion. 1893 1894 class IterateOopClosureRegionClosure: public HeapRegionClosure { 1895 MemRegion _mr; 1896 OopClosure* _cl; 1897 public: 1898 IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl) 1899 : _mr(mr), _cl(cl) {} 1900 bool doHeapRegion(HeapRegion* r) { 1901 if (! r->continuesHumongous()) { 1902 r->oop_iterate(_cl); 1903 } 1904 return false; 1905 } 1906 }; 1907 1908 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { 1909 IterateOopClosureRegionClosure blk(_g1_committed, cl); 1910 _hrs->iterate(&blk); 1911 if (do_perm) { 1912 perm_gen()->oop_iterate(cl); 1913 } 1914 } 1915 1916 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { 1917 IterateOopClosureRegionClosure blk(mr, cl); 1918 _hrs->iterate(&blk); 1919 if (do_perm) { 1920 perm_gen()->oop_iterate(cl); 1921 } 1922 } 1923 1924 // Iterates an ObjectClosure over all objects within a HeapRegion. 1925 1926 class IterateObjectClosureRegionClosure: public HeapRegionClosure { 1927 ObjectClosure* _cl; 1928 public: 1929 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} 1930 bool doHeapRegion(HeapRegion* r) { 1931 if (! r->continuesHumongous()) { 1932 r->object_iterate(_cl); 1933 } 1934 return false; 1935 } 1936 }; 1937 1938 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { 1939 IterateObjectClosureRegionClosure blk(cl); 1940 _hrs->iterate(&blk); 1941 if (do_perm) { 1942 perm_gen()->object_iterate(cl); 1943 } 1944 } 1945 1946 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { 1947 // FIXME: is this right? 1948 guarantee(false, "object_iterate_since_last_GC not supported by G1 heap"); 1949 } 1950 1951 // Calls a SpaceClosure on a HeapRegion. 1952 1953 class SpaceClosureRegionClosure: public HeapRegionClosure { 1954 SpaceClosure* _cl; 1955 public: 1956 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {} 1957 bool doHeapRegion(HeapRegion* r) { 1958 _cl->do_space(r); 1959 return false; 1960 } 1961 }; 1962 1963 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { 1964 SpaceClosureRegionClosure blk(cl); 1965 _hrs->iterate(&blk); 1966 } 1967 1968 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { 1969 _hrs->iterate(cl); 1970 } 1971 1972 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, 1973 HeapRegionClosure* cl) { 1974 _hrs->iterate_from(r, cl); 1975 } 1976 1977 void 1978 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) { 1979 _hrs->iterate_from(idx, cl); 1980 } 1981 1982 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } 1983 1984 void 1985 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 1986 int worker, 1987 jint claim_value) { 1988 const size_t regions = n_regions(); 1989 const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1); 1990 // try to spread out the starting points of the workers 1991 const size_t start_index = regions / worker_num * (size_t) worker; 1992 1993 // each worker will actually look at all regions 1994 for (size_t count = 0; count < regions; ++count) { 1995 const size_t index = (start_index + count) % regions; 1996 assert(0 <= index && index < regions, "sanity"); 1997 HeapRegion* r = region_at(index); 1998 // we'll ignore "continues humongous" regions (we'll process them 1999 // when we come across their corresponding "start humongous" 2000 // region) and regions already claimed 2001 if (r->claim_value() == claim_value || r->continuesHumongous()) { 2002 continue; 2003 } 2004 // OK, try to claim it 2005 if (r->claimHeapRegion(claim_value)) { 2006 // success! 2007 assert(!r->continuesHumongous(), "sanity"); 2008 if (r->startsHumongous()) { 2009 // If the region is "starts humongous" we'll iterate over its 2010 // "continues humongous" first; in fact we'll do them 2011 // first. The order is important. In on case, calling the 2012 // closure on the "starts humongous" region might de-allocate 2013 // and clear all its "continues humongous" regions and, as a 2014 // result, we might end up processing them twice. So, we'll do 2015 // them first (notice: most closures will ignore them anyway) and 2016 // then we'll do the "starts humongous" region. 2017 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { 2018 HeapRegion* chr = region_at(ch_index); 2019 2020 // if the region has already been claimed or it's not 2021 // "continues humongous" we're done 2022 if (chr->claim_value() == claim_value || 2023 !chr->continuesHumongous()) { 2024 break; 2025 } 2026 2027 // Noone should have claimed it directly. We can given 2028 // that we claimed its "starts humongous" region. 2029 assert(chr->claim_value() != claim_value, "sanity"); 2030 assert(chr->humongous_start_region() == r, "sanity"); 2031 2032 if (chr->claimHeapRegion(claim_value)) { 2033 // we should always be able to claim it; noone else should 2034 // be trying to claim this region 2035 2036 bool res2 = cl->doHeapRegion(chr); 2037 assert(!res2, "Should not abort"); 2038 2039 // Right now, this holds (i.e., no closure that actually 2040 // does something with "continues humongous" regions 2041 // clears them). We might have to weaken it in the future, 2042 // but let's leave these two asserts here for extra safety. 2043 assert(chr->continuesHumongous(), "should still be the case"); 2044 assert(chr->humongous_start_region() == r, "sanity"); 2045 } else { 2046 guarantee(false, "we should not reach here"); 2047 } 2048 } 2049 } 2050 2051 assert(!r->continuesHumongous(), "sanity"); 2052 bool res = cl->doHeapRegion(r); 2053 assert(!res, "Should not abort"); 2054 } 2055 } 2056 } 2057 2058 class ResetClaimValuesClosure: public HeapRegionClosure { 2059 public: 2060 bool doHeapRegion(HeapRegion* r) { 2061 r->set_claim_value(HeapRegion::InitialClaimValue); 2062 return false; 2063 } 2064 }; 2065 2066 void 2067 G1CollectedHeap::reset_heap_region_claim_values() { 2068 ResetClaimValuesClosure blk; 2069 heap_region_iterate(&blk); 2070 } 2071 2072 #ifdef ASSERT 2073 // This checks whether all regions in the heap have the correct claim 2074 // value. I also piggy-backed on this a check to ensure that the 2075 // humongous_start_region() information on "continues humongous" 2076 // regions is correct. 2077 2078 class CheckClaimValuesClosure : public HeapRegionClosure { 2079 private: 2080 jint _claim_value; 2081 size_t _failures; 2082 HeapRegion* _sh_region; 2083 public: 2084 CheckClaimValuesClosure(jint claim_value) : 2085 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } 2086 bool doHeapRegion(HeapRegion* r) { 2087 if (r->claim_value() != _claim_value) { 2088 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " 2089 "claim value = %d, should be %d", 2090 r->bottom(), r->end(), r->claim_value(), 2091 _claim_value); 2092 ++_failures; 2093 } 2094 if (!r->isHumongous()) { 2095 _sh_region = NULL; 2096 } else if (r->startsHumongous()) { 2097 _sh_region = r; 2098 } else if (r->continuesHumongous()) { 2099 if (r->humongous_start_region() != _sh_region) { 2100 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " 2101 "HS = "PTR_FORMAT", should be "PTR_FORMAT, 2102 r->bottom(), r->end(), 2103 r->humongous_start_region(), 2104 _sh_region); 2105 ++_failures; 2106 } 2107 } 2108 return false; 2109 } 2110 size_t failures() { 2111 return _failures; 2112 } 2113 }; 2114 2115 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { 2116 CheckClaimValuesClosure cl(claim_value); 2117 heap_region_iterate(&cl); 2118 return cl.failures() == 0; 2119 } 2120 #endif // ASSERT 2121 2122 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { 2123 HeapRegion* r = g1_policy()->collection_set(); 2124 while (r != NULL) { 2125 HeapRegion* next = r->next_in_collection_set(); 2126 if (cl->doHeapRegion(r)) { 2127 cl->incomplete(); 2128 return; 2129 } 2130 r = next; 2131 } 2132 } 2133 2134 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, 2135 HeapRegionClosure *cl) { 2136 if (r == NULL) { 2137 // The CSet is empty so there's nothing to do. 2138 return; 2139 } 2140 2141 assert(r->in_collection_set(), 2142 "Start region must be a member of the collection set."); 2143 HeapRegion* cur = r; 2144 while (cur != NULL) { 2145 HeapRegion* next = cur->next_in_collection_set(); 2146 if (cl->doHeapRegion(cur) && false) { 2147 cl->incomplete(); 2148 return; 2149 } 2150 cur = next; 2151 } 2152 cur = g1_policy()->collection_set(); 2153 while (cur != r) { 2154 HeapRegion* next = cur->next_in_collection_set(); 2155 if (cl->doHeapRegion(cur) && false) { 2156 cl->incomplete(); 2157 return; 2158 } 2159 cur = next; 2160 } 2161 } 2162 2163 CompactibleSpace* G1CollectedHeap::first_compactible_space() { 2164 return _hrs->length() > 0 ? _hrs->at(0) : NULL; 2165 } 2166 2167 2168 Space* G1CollectedHeap::space_containing(const void* addr) const { 2169 Space* res = heap_region_containing(addr); 2170 if (res == NULL) 2171 res = perm_gen()->space_containing(addr); 2172 return res; 2173 } 2174 2175 HeapWord* G1CollectedHeap::block_start(const void* addr) const { 2176 Space* sp = space_containing(addr); 2177 if (sp != NULL) { 2178 return sp->block_start(addr); 2179 } 2180 return NULL; 2181 } 2182 2183 size_t G1CollectedHeap::block_size(const HeapWord* addr) const { 2184 Space* sp = space_containing(addr); 2185 assert(sp != NULL, "block_size of address outside of heap"); 2186 return sp->block_size(addr); 2187 } 2188 2189 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { 2190 Space* sp = space_containing(addr); 2191 return sp->block_is_obj(addr); 2192 } 2193 2194 bool G1CollectedHeap::supports_tlab_allocation() const { 2195 return true; 2196 } 2197 2198 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { 2199 return HeapRegion::GrainBytes; 2200 } 2201 2202 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { 2203 // Return the remaining space in the cur alloc region, but not less than 2204 // the min TLAB size. 2205 2206 // Also, this value can be at most the humongous object threshold, 2207 // since we can't allow tlabs to grow big enough to accomodate 2208 // humongous objects. 2209 2210 // We need to store the cur alloc region locally, since it might change 2211 // between when we test for NULL and when we use it later. 2212 ContiguousSpace* cur_alloc_space = _cur_alloc_region; 2213 size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; 2214 2215 if (cur_alloc_space == NULL) { 2216 return max_tlab_size; 2217 } else { 2218 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), 2219 max_tlab_size); 2220 } 2221 } 2222 2223 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { 2224 assert(!isHumongous(word_size), 2225 err_msg("a TLAB should not be of humongous size, " 2226 "word_size = "SIZE_FORMAT, word_size)); 2227 bool dummy; 2228 return G1CollectedHeap::mem_allocate(word_size, false, true, &dummy); 2229 } 2230 2231 bool G1CollectedHeap::allocs_are_zero_filled() { 2232 return false; 2233 } 2234 2235 size_t G1CollectedHeap::large_typearray_limit() { 2236 // FIXME 2237 return HeapRegion::GrainBytes/HeapWordSize; 2238 } 2239 2240 size_t G1CollectedHeap::max_capacity() const { 2241 return g1_reserved_obj_bytes(); 2242 } 2243 2244 jlong G1CollectedHeap::millis_since_last_gc() { 2245 // assert(false, "NYI"); 2246 return 0; 2247 } 2248 2249 2250 void G1CollectedHeap::prepare_for_verify() { 2251 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 2252 ensure_parsability(false); 2253 } 2254 g1_rem_set()->prepare_for_verify(); 2255 } 2256 2257 class VerifyLivenessOopClosure: public OopClosure { 2258 G1CollectedHeap* g1h; 2259 public: 2260 VerifyLivenessOopClosure(G1CollectedHeap* _g1h) { 2261 g1h = _g1h; 2262 } 2263 void do_oop(narrowOop *p) { do_oop_work(p); } 2264 void do_oop( oop *p) { do_oop_work(p); } 2265 2266 template <class T> void do_oop_work(T *p) { 2267 oop obj = oopDesc::load_decode_heap_oop(p); 2268 guarantee(obj == NULL || !g1h->is_obj_dead(obj), 2269 "Dead object referenced by a not dead object"); 2270 } 2271 }; 2272 2273 class VerifyObjsInRegionClosure: public ObjectClosure { 2274 private: 2275 G1CollectedHeap* _g1h; 2276 size_t _live_bytes; 2277 HeapRegion *_hr; 2278 bool _use_prev_marking; 2279 public: 2280 // use_prev_marking == true -> use "prev" marking information, 2281 // use_prev_marking == false -> use "next" marking information 2282 VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking) 2283 : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) { 2284 _g1h = G1CollectedHeap::heap(); 2285 } 2286 void do_object(oop o) { 2287 VerifyLivenessOopClosure isLive(_g1h); 2288 assert(o != NULL, "Huh?"); 2289 if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { 2290 o->oop_iterate(&isLive); 2291 if (!_hr->obj_allocated_since_prev_marking(o)) { 2292 size_t obj_size = o->size(); // Make sure we don't overflow 2293 _live_bytes += (obj_size * HeapWordSize); 2294 } 2295 } 2296 } 2297 size_t live_bytes() { return _live_bytes; } 2298 }; 2299 2300 class PrintObjsInRegionClosure : public ObjectClosure { 2301 HeapRegion *_hr; 2302 G1CollectedHeap *_g1; 2303 public: 2304 PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) { 2305 _g1 = G1CollectedHeap::heap(); 2306 }; 2307 2308 void do_object(oop o) { 2309 if (o != NULL) { 2310 HeapWord *start = (HeapWord *) o; 2311 size_t word_sz = o->size(); 2312 gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT 2313 " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n", 2314 (void*) o, word_sz, 2315 _g1->isMarkedPrev(o), 2316 _g1->isMarkedNext(o), 2317 _hr->obj_allocated_since_prev_marking(o)); 2318 HeapWord *end = start + word_sz; 2319 HeapWord *cur; 2320 int *val; 2321 for (cur = start; cur < end; cur++) { 2322 val = (int *) cur; 2323 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val); 2324 } 2325 } 2326 } 2327 }; 2328 2329 class VerifyRegionClosure: public HeapRegionClosure { 2330 private: 2331 bool _allow_dirty; 2332 bool _par; 2333 bool _use_prev_marking; 2334 bool _failures; 2335 public: 2336 // use_prev_marking == true -> use "prev" marking information, 2337 // use_prev_marking == false -> use "next" marking information 2338 VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking) 2339 : _allow_dirty(allow_dirty), 2340 _par(par), 2341 _use_prev_marking(use_prev_marking), 2342 _failures(false) {} 2343 2344 bool failures() { 2345 return _failures; 2346 } 2347 2348 bool doHeapRegion(HeapRegion* r) { 2349 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue, 2350 "Should be unclaimed at verify points."); 2351 if (!r->continuesHumongous()) { 2352 bool failures = false; 2353 r->verify(_allow_dirty, _use_prev_marking, &failures); 2354 if (failures) { 2355 _failures = true; 2356 } else { 2357 VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking); 2358 r->object_iterate(¬_dead_yet_cl); 2359 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { 2360 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] " 2361 "max_live_bytes "SIZE_FORMAT" " 2362 "< calculated "SIZE_FORMAT, 2363 r->bottom(), r->end(), 2364 r->max_live_bytes(), 2365 not_dead_yet_cl.live_bytes()); 2366 _failures = true; 2367 } 2368 } 2369 } 2370 return false; // stop the region iteration if we hit a failure 2371 } 2372 }; 2373 2374 class VerifyRootsClosure: public OopsInGenClosure { 2375 private: 2376 G1CollectedHeap* _g1h; 2377 bool _use_prev_marking; 2378 bool _failures; 2379 public: 2380 // use_prev_marking == true -> use "prev" marking information, 2381 // use_prev_marking == false -> use "next" marking information 2382 VerifyRootsClosure(bool use_prev_marking) : 2383 _g1h(G1CollectedHeap::heap()), 2384 _use_prev_marking(use_prev_marking), 2385 _failures(false) { } 2386 2387 bool failures() { return _failures; } 2388 2389 template <class T> void do_oop_nv(T* p) { 2390 T heap_oop = oopDesc::load_heap_oop(p); 2391 if (!oopDesc::is_null(heap_oop)) { 2392 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 2393 if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) { 2394 gclog_or_tty->print_cr("Root location "PTR_FORMAT" " 2395 "points to dead obj "PTR_FORMAT, p, (void*) obj); 2396 obj->print_on(gclog_or_tty); 2397 _failures = true; 2398 } 2399 } 2400 } 2401 2402 void do_oop(oop* p) { do_oop_nv(p); } 2403 void do_oop(narrowOop* p) { do_oop_nv(p); } 2404 }; 2405 2406 // This is the task used for parallel heap verification. 2407 2408 class G1ParVerifyTask: public AbstractGangTask { 2409 private: 2410 G1CollectedHeap* _g1h; 2411 bool _allow_dirty; 2412 bool _use_prev_marking; 2413 bool _failures; 2414 2415 public: 2416 // use_prev_marking == true -> use "prev" marking information, 2417 // use_prev_marking == false -> use "next" marking information 2418 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, 2419 bool use_prev_marking) : 2420 AbstractGangTask("Parallel verify task"), 2421 _g1h(g1h), 2422 _allow_dirty(allow_dirty), 2423 _use_prev_marking(use_prev_marking), 2424 _failures(false) { } 2425 2426 bool failures() { 2427 return _failures; 2428 } 2429 2430 void work(int worker_i) { 2431 HandleMark hm; 2432 VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking); 2433 _g1h->heap_region_par_iterate_chunked(&blk, worker_i, 2434 HeapRegion::ParVerifyClaimValue); 2435 if (blk.failures()) { 2436 _failures = true; 2437 } 2438 } 2439 }; 2440 2441 void G1CollectedHeap::verify(bool allow_dirty, bool silent) { 2442 verify(allow_dirty, silent, /* use_prev_marking */ true); 2443 } 2444 2445 void G1CollectedHeap::verify(bool allow_dirty, 2446 bool silent, 2447 bool use_prev_marking) { 2448 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 2449 if (!silent) { gclog_or_tty->print("roots "); } 2450 VerifyRootsClosure rootsCl(use_prev_marking); 2451 CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); 2452 process_strong_roots(true, // activate StrongRootsScope 2453 false, 2454 SharedHeap::SO_AllClasses, 2455 &rootsCl, 2456 &blobsCl, 2457 &rootsCl); 2458 bool failures = rootsCl.failures(); 2459 rem_set()->invalidate(perm_gen()->used_region(), false); 2460 if (!silent) { gclog_or_tty->print("heapRegions "); } 2461 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { 2462 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2463 "sanity check"); 2464 2465 G1ParVerifyTask task(this, allow_dirty, use_prev_marking); 2466 int n_workers = workers()->total_workers(); 2467 set_par_threads(n_workers); 2468 workers()->run_task(&task); 2469 set_par_threads(0); 2470 if (task.failures()) { 2471 failures = true; 2472 } 2473 2474 assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), 2475 "sanity check"); 2476 2477 reset_heap_region_claim_values(); 2478 2479 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2480 "sanity check"); 2481 } else { 2482 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); 2483 _hrs->iterate(&blk); 2484 if (blk.failures()) { 2485 failures = true; 2486 } 2487 } 2488 if (!silent) gclog_or_tty->print("remset "); 2489 rem_set()->verify(); 2490 2491 if (failures) { 2492 gclog_or_tty->print_cr("Heap:"); 2493 print_on(gclog_or_tty, true /* extended */); 2494 gclog_or_tty->print_cr(""); 2495 #ifndef PRODUCT 2496 if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { 2497 concurrent_mark()->print_reachable("at-verification-failure", 2498 use_prev_marking, false /* all */); 2499 } 2500 #endif 2501 gclog_or_tty->flush(); 2502 } 2503 guarantee(!failures, "there should not have been any failures"); 2504 } else { 2505 if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); 2506 } 2507 } 2508 2509 class PrintRegionClosure: public HeapRegionClosure { 2510 outputStream* _st; 2511 public: 2512 PrintRegionClosure(outputStream* st) : _st(st) {} 2513 bool doHeapRegion(HeapRegion* r) { 2514 r->print_on(_st); 2515 return false; 2516 } 2517 }; 2518 2519 void G1CollectedHeap::print() const { print_on(tty); } 2520 2521 void G1CollectedHeap::print_on(outputStream* st) const { 2522 print_on(st, PrintHeapAtGCExtended); 2523 } 2524 2525 void G1CollectedHeap::print_on(outputStream* st, bool extended) const { 2526 st->print(" %-20s", "garbage-first heap"); 2527 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 2528 capacity()/K, used_unlocked()/K); 2529 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 2530 _g1_storage.low_boundary(), 2531 _g1_storage.high(), 2532 _g1_storage.high_boundary()); 2533 st->cr(); 2534 st->print(" region size " SIZE_FORMAT "K, ", 2535 HeapRegion::GrainBytes/K); 2536 size_t young_regions = _young_list->length(); 2537 st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", 2538 young_regions, young_regions * HeapRegion::GrainBytes / K); 2539 size_t survivor_regions = g1_policy()->recorded_survivor_regions(); 2540 st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", 2541 survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); 2542 st->cr(); 2543 perm()->as_gen()->print_on(st); 2544 if (extended) { 2545 st->cr(); 2546 print_on_extended(st); 2547 } 2548 } 2549 2550 void G1CollectedHeap::print_on_extended(outputStream* st) const { 2551 PrintRegionClosure blk(st); 2552 _hrs->iterate(&blk); 2553 } 2554 2555 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { 2556 if (G1CollectedHeap::use_parallel_gc_threads()) { 2557 workers()->print_worker_threads_on(st); 2558 } 2559 2560 _cmThread->print_on(st); 2561 st->cr(); 2562 2563 _cm->print_worker_threads_on(st); 2564 2565 _cg1r->print_worker_threads_on(st); 2566 2567 _czft->print_on(st); 2568 st->cr(); 2569 } 2570 2571 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const { 2572 if (G1CollectedHeap::use_parallel_gc_threads()) { 2573 workers()->threads_do(tc); 2574 } 2575 tc->do_thread(_cmThread); 2576 _cg1r->threads_do(tc); 2577 tc->do_thread(_czft); 2578 } 2579 2580 void G1CollectedHeap::print_tracing_info() const { 2581 // We'll overload this to mean "trace GC pause statistics." 2582 if (TraceGen0Time || TraceGen1Time) { 2583 // The "G1CollectorPolicy" is keeping track of these stats, so delegate 2584 // to that. 2585 g1_policy()->print_tracing_info(); 2586 } 2587 if (G1SummarizeRSetStats) { 2588 g1_rem_set()->print_summary_info(); 2589 } 2590 if (G1SummarizeConcMark) { 2591 concurrent_mark()->print_summary_info(); 2592 } 2593 if (G1SummarizeZFStats) { 2594 ConcurrentZFThread::print_summary_info(); 2595 } 2596 g1_policy()->print_yg_surv_rate_info(); 2597 2598 SpecializationStats::print(); 2599 } 2600 2601 2602 int G1CollectedHeap::addr_to_arena_id(void* addr) const { 2603 HeapRegion* hr = heap_region_containing(addr); 2604 if (hr == NULL) { 2605 return 0; 2606 } else { 2607 return 1; 2608 } 2609 } 2610 2611 G1CollectedHeap* G1CollectedHeap::heap() { 2612 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, 2613 "not a garbage-first heap"); 2614 return _g1h; 2615 } 2616 2617 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { 2618 // always_do_update_barrier = false; 2619 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); 2620 // Call allocation profiler 2621 AllocationProfiler::iterate_since_last_gc(); 2622 // Fill TLAB's and such 2623 ensure_parsability(true); 2624 } 2625 2626 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { 2627 // FIXME: what is this about? 2628 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" 2629 // is set. 2630 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), 2631 "derived pointer present")); 2632 // always_do_update_barrier = true; 2633 } 2634 2635 void G1CollectedHeap::do_collection_pause() { 2636 assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock"); 2637 2638 // Read the GC count while holding the Heap_lock 2639 // we need to do this _before_ wait_for_cleanup_complete(), to 2640 // ensure that we do not give up the heap lock and potentially 2641 // pick up the wrong count 2642 unsigned int gc_count_before = SharedHeap::heap()->total_collections(); 2643 2644 // Don't want to do a GC pause while cleanup is being completed! 2645 wait_for_cleanup_complete(); 2646 2647 g1_policy()->record_stop_world_start(); 2648 { 2649 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 2650 VM_G1IncCollectionPause op(gc_count_before, 2651 false, /* should_initiate_conc_mark */ 2652 g1_policy()->max_pause_time_ms(), 2653 GCCause::_g1_inc_collection_pause); 2654 VMThread::execute(&op); 2655 } 2656 } 2657 2658 void 2659 G1CollectedHeap::doConcurrentMark() { 2660 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 2661 if (!_cmThread->in_progress()) { 2662 _cmThread->set_started(); 2663 CGC_lock->notify(); 2664 } 2665 } 2666 2667 class VerifyMarkedObjsClosure: public ObjectClosure { 2668 G1CollectedHeap* _g1h; 2669 public: 2670 VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {} 2671 void do_object(oop obj) { 2672 assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true, 2673 "markandsweep mark should agree with concurrent deadness"); 2674 } 2675 }; 2676 2677 void 2678 G1CollectedHeap::checkConcurrentMark() { 2679 VerifyMarkedObjsClosure verifycl(this); 2680 // MutexLockerEx x(getMarkBitMapLock(), 2681 // Mutex::_no_safepoint_check_flag); 2682 object_iterate(&verifycl, false); 2683 } 2684 2685 void G1CollectedHeap::do_sync_mark() { 2686 _cm->checkpointRootsInitial(); 2687 _cm->markFromRoots(); 2688 _cm->checkpointRootsFinal(false); 2689 } 2690 2691 // <NEW PREDICTION> 2692 2693 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr, 2694 bool young) { 2695 return _g1_policy->predict_region_elapsed_time_ms(hr, young); 2696 } 2697 2698 void G1CollectedHeap::check_if_region_is_too_expensive(double 2699 predicted_time_ms) { 2700 _g1_policy->check_if_region_is_too_expensive(predicted_time_ms); 2701 } 2702 2703 size_t G1CollectedHeap::pending_card_num() { 2704 size_t extra_cards = 0; 2705 JavaThread *curr = Threads::first(); 2706 while (curr != NULL) { 2707 DirtyCardQueue& dcq = curr->dirty_card_queue(); 2708 extra_cards += dcq.size(); 2709 curr = curr->next(); 2710 } 2711 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 2712 size_t buffer_size = dcqs.buffer_size(); 2713 size_t buffer_num = dcqs.completed_buffers_num(); 2714 return buffer_size * buffer_num + extra_cards; 2715 } 2716 2717 size_t G1CollectedHeap::max_pending_card_num() { 2718 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 2719 size_t buffer_size = dcqs.buffer_size(); 2720 size_t buffer_num = dcqs.completed_buffers_num(); 2721 int thread_num = Threads::number_of_threads(); 2722 return (buffer_num + thread_num) * buffer_size; 2723 } 2724 2725 size_t G1CollectedHeap::cards_scanned() { 2726 return g1_rem_set()->cardsScanned(); 2727 } 2728 2729 void 2730 G1CollectedHeap::setup_surviving_young_words() { 2731 guarantee( _surviving_young_words == NULL, "pre-condition" ); 2732 size_t array_length = g1_policy()->young_cset_length(); 2733 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); 2734 if (_surviving_young_words == NULL) { 2735 vm_exit_out_of_memory(sizeof(size_t) * array_length, 2736 "Not enough space for young surv words summary."); 2737 } 2738 memset(_surviving_young_words, 0, array_length * sizeof(size_t)); 2739 #ifdef ASSERT 2740 for (size_t i = 0; i < array_length; ++i) { 2741 assert( _surviving_young_words[i] == 0, "memset above" ); 2742 } 2743 #endif // !ASSERT 2744 } 2745 2746 void 2747 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { 2748 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 2749 size_t array_length = g1_policy()->young_cset_length(); 2750 for (size_t i = 0; i < array_length; ++i) 2751 _surviving_young_words[i] += surv_young_words[i]; 2752 } 2753 2754 void 2755 G1CollectedHeap::cleanup_surviving_young_words() { 2756 guarantee( _surviving_young_words != NULL, "pre-condition" ); 2757 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); 2758 _surviving_young_words = NULL; 2759 } 2760 2761 // </NEW PREDICTION> 2762 2763 struct PrepareForRSScanningClosure : public HeapRegionClosure { 2764 bool doHeapRegion(HeapRegion *r) { 2765 r->rem_set()->set_iter_claimed(0); 2766 return false; 2767 } 2768 }; 2769 2770 #if TASKQUEUE_STATS 2771 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { 2772 st->print_raw_cr("GC Task Stats"); 2773 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); 2774 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); 2775 } 2776 2777 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { 2778 print_taskqueue_stats_hdr(st); 2779 2780 TaskQueueStats totals; 2781 const int n = workers() != NULL ? workers()->total_workers() : 1; 2782 for (int i = 0; i < n; ++i) { 2783 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr(); 2784 totals += task_queue(i)->stats; 2785 } 2786 st->print_raw("tot "); totals.print(st); st->cr(); 2787 2788 DEBUG_ONLY(totals.verify()); 2789 } 2790 2791 void G1CollectedHeap::reset_taskqueue_stats() { 2792 const int n = workers() != NULL ? workers()->total_workers() : 1; 2793 for (int i = 0; i < n; ++i) { 2794 task_queue(i)->stats.reset(); 2795 } 2796 } 2797 #endif // TASKQUEUE_STATS 2798 2799 void 2800 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { 2801 if (GC_locker::check_active_before_gc()) { 2802 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 2803 } 2804 2805 if (PrintHeapAtGC) { 2806 Universe::print_heap_before_gc(); 2807 } 2808 2809 { 2810 ResourceMark rm; 2811 2812 // This call will decide whether this pause is an initial-mark 2813 // pause. If it is, during_initial_mark_pause() will return true 2814 // for the duration of this pause. 2815 g1_policy()->decide_on_conc_mark_initiation(); 2816 2817 char verbose_str[128]; 2818 sprintf(verbose_str, "GC pause "); 2819 if (g1_policy()->in_young_gc_mode()) { 2820 if (g1_policy()->full_young_gcs()) 2821 strcat(verbose_str, "(young)"); 2822 else 2823 strcat(verbose_str, "(partial)"); 2824 } 2825 if (g1_policy()->during_initial_mark_pause()) { 2826 strcat(verbose_str, " (initial-mark)"); 2827 // We are about to start a marking cycle, so we increment the 2828 // full collection counter. 2829 increment_total_full_collections(); 2830 } 2831 2832 // if PrintGCDetails is on, we'll print long statistics information 2833 // in the collector policy code, so let's not print this as the output 2834 // is messy if we do. 2835 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 2836 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 2837 TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); 2838 2839 TraceMemoryManagerStats tms(false /* fullGC */); 2840 2841 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 2842 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); 2843 guarantee(!is_gc_active(), "collection is not reentrant"); 2844 assert(regions_accounted_for(), "Region leakage!"); 2845 2846 increment_gc_time_stamp(); 2847 2848 if (g1_policy()->in_young_gc_mode()) { 2849 assert(check_young_list_well_formed(), 2850 "young list should be well formed"); 2851 } 2852 2853 { // Call to jvmpi::post_class_unload_events must occur outside of active GC 2854 IsGCActiveMark x; 2855 2856 gc_prologue(false); 2857 increment_total_collections(false /* full gc */); 2858 2859 #if G1_REM_SET_LOGGING 2860 gclog_or_tty->print_cr("\nJust chose CS, heap:"); 2861 print(); 2862 #endif 2863 2864 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { 2865 HandleMark hm; // Discard invalid handles created during verification 2866 prepare_for_verify(); 2867 gclog_or_tty->print(" VerifyBeforeGC:"); 2868 Universe::verify(false); 2869 } 2870 2871 COMPILER2_PRESENT(DerivedPointerTable::clear()); 2872 2873 // We want to turn off ref discovery, if necessary, and turn it back on 2874 // on again later if we do. XXX Dubious: why is discovery disabled? 2875 bool was_enabled = ref_processor()->discovery_enabled(); 2876 if (was_enabled) ref_processor()->disable_discovery(); 2877 2878 // Forget the current alloc region (we might even choose it to be part 2879 // of the collection set!). 2880 abandon_cur_alloc_region(); 2881 2882 // The elapsed time induced by the start time below deliberately elides 2883 // the possible verification above. 2884 double start_time_sec = os::elapsedTime(); 2885 size_t start_used_bytes = used(); 2886 2887 #if YOUNG_LIST_VERBOSE 2888 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); 2889 _young_list->print(); 2890 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); 2891 #endif // YOUNG_LIST_VERBOSE 2892 2893 g1_policy()->record_collection_pause_start(start_time_sec, 2894 start_used_bytes); 2895 2896 #if YOUNG_LIST_VERBOSE 2897 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); 2898 _young_list->print(); 2899 #endif // YOUNG_LIST_VERBOSE 2900 2901 if (g1_policy()->during_initial_mark_pause()) { 2902 concurrent_mark()->checkpointRootsInitialPre(); 2903 } 2904 save_marks(); 2905 2906 // We must do this before any possible evacuation that should propagate 2907 // marks. 2908 if (mark_in_progress()) { 2909 double start_time_sec = os::elapsedTime(); 2910 2911 _cm->drainAllSATBBuffers(); 2912 double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0; 2913 g1_policy()->record_satb_drain_time(finish_mark_ms); 2914 } 2915 // Record the number of elements currently on the mark stack, so we 2916 // only iterate over these. (Since evacuation may add to the mark 2917 // stack, doing more exposes race conditions.) If no mark is in 2918 // progress, this will be zero. 2919 _cm->set_oops_do_bound(); 2920 2921 assert(regions_accounted_for(), "Region leakage."); 2922 2923 if (mark_in_progress()) 2924 concurrent_mark()->newCSet(); 2925 2926 #if YOUNG_LIST_VERBOSE 2927 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); 2928 _young_list->print(); 2929 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); 2930 #endif // YOUNG_LIST_VERBOSE 2931 2932 g1_policy()->choose_collection_set(target_pause_time_ms); 2933 2934 // Nothing to do if we were unable to choose a collection set. 2935 #if G1_REM_SET_LOGGING 2936 gclog_or_tty->print_cr("\nAfter pause, heap:"); 2937 print(); 2938 #endif 2939 PrepareForRSScanningClosure prepare_for_rs_scan; 2940 collection_set_iterate(&prepare_for_rs_scan); 2941 2942 setup_surviving_young_words(); 2943 2944 // Set up the gc allocation regions. 2945 get_gc_alloc_regions(); 2946 2947 // Actually do the work... 2948 evacuate_collection_set(); 2949 2950 free_collection_set(g1_policy()->collection_set()); 2951 g1_policy()->clear_collection_set(); 2952 2953 cleanup_surviving_young_words(); 2954 2955 // Start a new incremental collection set for the next pause. 2956 g1_policy()->start_incremental_cset_building(); 2957 2958 // Clear the _cset_fast_test bitmap in anticipation of adding 2959 // regions to the incremental collection set for the next 2960 // evacuation pause. 2961 clear_cset_fast_test(); 2962 2963 if (g1_policy()->in_young_gc_mode()) { 2964 _young_list->reset_sampled_info(); 2965 2966 // Don't check the whole heap at this point as the 2967 // GC alloc regions from this pause have been tagged 2968 // as survivors and moved on to the survivor list. 2969 // Survivor regions will fail the !is_young() check. 2970 assert(check_young_list_empty(false /* check_heap */), 2971 "young list should be empty"); 2972 2973 #if YOUNG_LIST_VERBOSE 2974 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); 2975 _young_list->print(); 2976 #endif // YOUNG_LIST_VERBOSE 2977 2978 g1_policy()->record_survivor_regions(_young_list->survivor_length(), 2979 _young_list->first_survivor_region(), 2980 _young_list->last_survivor_region()); 2981 2982 _young_list->reset_auxilary_lists(); 2983 } 2984 2985 if (evacuation_failed()) { 2986 _summary_bytes_used = recalculate_used(); 2987 } else { 2988 // The "used" of the the collection set have already been subtracted 2989 // when they were freed. Add in the bytes evacuated. 2990 _summary_bytes_used += g1_policy()->bytes_in_to_space(); 2991 } 2992 2993 if (g1_policy()->in_young_gc_mode() && 2994 g1_policy()->during_initial_mark_pause()) { 2995 concurrent_mark()->checkpointRootsInitialPost(); 2996 set_marking_started(); 2997 // CAUTION: after the doConcurrentMark() call below, 2998 // the concurrent marking thread(s) could be running 2999 // concurrently with us. Make sure that anything after 3000 // this point does not assume that we are the only GC thread 3001 // running. Note: of course, the actual marking work will 3002 // not start until the safepoint itself is released in 3003 // ConcurrentGCThread::safepoint_desynchronize(). 3004 doConcurrentMark(); 3005 } 3006 3007 #if YOUNG_LIST_VERBOSE 3008 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); 3009 _young_list->print(); 3010 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); 3011 #endif // YOUNG_LIST_VERBOSE 3012 3013 double end_time_sec = os::elapsedTime(); 3014 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; 3015 g1_policy()->record_pause_time_ms(pause_time_ms); 3016 g1_policy()->record_collection_pause_end(); 3017 3018 assert(regions_accounted_for(), "Region leakage."); 3019 3020 MemoryService::track_memory_usage(); 3021 3022 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) { 3023 HandleMark hm; // Discard invalid handles created during verification 3024 gclog_or_tty->print(" VerifyAfterGC:"); 3025 prepare_for_verify(); 3026 Universe::verify(false); 3027 } 3028 3029 if (was_enabled) ref_processor()->enable_discovery(); 3030 3031 { 3032 size_t expand_bytes = g1_policy()->expansion_amount(); 3033 if (expand_bytes > 0) { 3034 size_t bytes_before = capacity(); 3035 expand(expand_bytes); 3036 } 3037 } 3038 3039 if (mark_in_progress()) { 3040 concurrent_mark()->update_g1_committed(); 3041 } 3042 3043 #ifdef TRACESPINNING 3044 ParallelTaskTerminator::print_termination_counts(); 3045 #endif 3046 3047 gc_epilogue(false); 3048 } 3049 3050 assert(verify_region_lists(), "Bad region lists."); 3051 3052 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { 3053 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); 3054 print_tracing_info(); 3055 vm_exit(-1); 3056 } 3057 } 3058 3059 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); 3060 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); 3061 3062 if (PrintHeapAtGC) { 3063 Universe::print_heap_after_gc(); 3064 } 3065 if (G1SummarizeRSetStats && 3066 (G1SummarizeRSetStatsPeriod > 0) && 3067 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { 3068 g1_rem_set()->print_summary_info(); 3069 } 3070 } 3071 3072 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) 3073 { 3074 size_t gclab_word_size; 3075 switch (purpose) { 3076 case GCAllocForSurvived: 3077 gclab_word_size = YoungPLABSize; 3078 break; 3079 case GCAllocForTenured: 3080 gclab_word_size = OldPLABSize; 3081 break; 3082 default: 3083 assert(false, "unknown GCAllocPurpose"); 3084 gclab_word_size = OldPLABSize; 3085 break; 3086 } 3087 return gclab_word_size; 3088 } 3089 3090 3091 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { 3092 assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); 3093 // make sure we don't call set_gc_alloc_region() multiple times on 3094 // the same region 3095 assert(r == NULL || !r->is_gc_alloc_region(), 3096 "shouldn't already be a GC alloc region"); 3097 assert(r == NULL || !r->isHumongous(), 3098 "humongous regions shouldn't be used as GC alloc regions"); 3099 3100 HeapWord* original_top = NULL; 3101 if (r != NULL) 3102 original_top = r->top(); 3103 3104 // We will want to record the used space in r as being there before gc. 3105 // One we install it as a GC alloc region it's eligible for allocation. 3106 // So record it now and use it later. 3107 size_t r_used = 0; 3108 if (r != NULL) { 3109 r_used = r->used(); 3110 3111 if (G1CollectedHeap::use_parallel_gc_threads()) { 3112 // need to take the lock to guard against two threads calling 3113 // get_gc_alloc_region concurrently (very unlikely but...) 3114 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 3115 r->save_marks(); 3116 } 3117 } 3118 HeapRegion* old_alloc_region = _gc_alloc_regions[purpose]; 3119 _gc_alloc_regions[purpose] = r; 3120 if (old_alloc_region != NULL) { 3121 // Replace aliases too. 3122 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3123 if (_gc_alloc_regions[ap] == old_alloc_region) { 3124 _gc_alloc_regions[ap] = r; 3125 } 3126 } 3127 } 3128 if (r != NULL) { 3129 push_gc_alloc_region(r); 3130 if (mark_in_progress() && original_top != r->next_top_at_mark_start()) { 3131 // We are using a region as a GC alloc region after it has been used 3132 // as a mutator allocation region during the current marking cycle. 3133 // The mutator-allocated objects are currently implicitly marked, but 3134 // when we move hr->next_top_at_mark_start() forward at the the end 3135 // of the GC pause, they won't be. We therefore mark all objects in 3136 // the "gap". We do this object-by-object, since marking densely 3137 // does not currently work right with marking bitmap iteration. This 3138 // means we rely on TLAB filling at the start of pauses, and no 3139 // "resuscitation" of filled TLAB's. If we want to do this, we need 3140 // to fix the marking bitmap iteration. 3141 HeapWord* curhw = r->next_top_at_mark_start(); 3142 HeapWord* t = original_top; 3143 3144 while (curhw < t) { 3145 oop cur = (oop)curhw; 3146 // We'll assume parallel for generality. This is rare code. 3147 concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them? 3148 curhw = curhw + cur->size(); 3149 } 3150 assert(curhw == t, "Should have parsed correctly."); 3151 } 3152 if (G1PolicyVerbose > 1) { 3153 gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") " 3154 "for survivors:", r->bottom(), original_top, r->end()); 3155 r->print(); 3156 } 3157 g1_policy()->record_before_bytes(r_used); 3158 } 3159 } 3160 3161 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) { 3162 assert(Thread::current()->is_VM_thread() || 3163 par_alloc_during_gc_lock()->owned_by_self(), "Precondition"); 3164 assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(), 3165 "Precondition."); 3166 hr->set_is_gc_alloc_region(true); 3167 hr->set_next_gc_alloc_region(_gc_alloc_region_list); 3168 _gc_alloc_region_list = hr; 3169 } 3170 3171 #ifdef G1_DEBUG 3172 class FindGCAllocRegion: public HeapRegionClosure { 3173 public: 3174 bool doHeapRegion(HeapRegion* r) { 3175 if (r->is_gc_alloc_region()) { 3176 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", 3177 r->hrs_index(), r->bottom()); 3178 } 3179 return false; 3180 } 3181 }; 3182 #endif // G1_DEBUG 3183 3184 void G1CollectedHeap::forget_alloc_region_list() { 3185 assert(Thread::current()->is_VM_thread(), "Precondition"); 3186 while (_gc_alloc_region_list != NULL) { 3187 HeapRegion* r = _gc_alloc_region_list; 3188 assert(r->is_gc_alloc_region(), "Invariant."); 3189 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on 3190 // newly allocated data in order to be able to apply deferred updates 3191 // before the GC is done for verification purposes (i.e to allow 3192 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the 3193 // collection. 3194 r->ContiguousSpace::set_saved_mark(); 3195 _gc_alloc_region_list = r->next_gc_alloc_region(); 3196 r->set_next_gc_alloc_region(NULL); 3197 r->set_is_gc_alloc_region(false); 3198 if (r->is_survivor()) { 3199 if (r->is_empty()) { 3200 r->set_not_young(); 3201 } else { 3202 _young_list->add_survivor_region(r); 3203 } 3204 } 3205 if (r->is_empty()) { 3206 ++_free_regions; 3207 } 3208 } 3209 #ifdef G1_DEBUG 3210 FindGCAllocRegion fa; 3211 heap_region_iterate(&fa); 3212 #endif // G1_DEBUG 3213 } 3214 3215 3216 bool G1CollectedHeap::check_gc_alloc_regions() { 3217 // TODO: allocation regions check 3218 return true; 3219 } 3220 3221 void G1CollectedHeap::get_gc_alloc_regions() { 3222 // First, let's check that the GC alloc region list is empty (it should) 3223 assert(_gc_alloc_region_list == NULL, "invariant"); 3224 3225 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3226 assert(_gc_alloc_regions[ap] == NULL, "invariant"); 3227 assert(_gc_alloc_region_counts[ap] == 0, "invariant"); 3228 3229 // Create new GC alloc regions. 3230 HeapRegion* alloc_region = _retained_gc_alloc_regions[ap]; 3231 _retained_gc_alloc_regions[ap] = NULL; 3232 3233 if (alloc_region != NULL) { 3234 assert(_retain_gc_alloc_region[ap], "only way to retain a GC region"); 3235 3236 // let's make sure that the GC alloc region is not tagged as such 3237 // outside a GC operation 3238 assert(!alloc_region->is_gc_alloc_region(), "sanity"); 3239 3240 if (alloc_region->in_collection_set() || 3241 alloc_region->top() == alloc_region->end() || 3242 alloc_region->top() == alloc_region->bottom() || 3243 alloc_region->isHumongous()) { 3244 // we will discard the current GC alloc region if 3245 // * it's in the collection set (it can happen!), 3246 // * it's already full (no point in using it), 3247 // * it's empty (this means that it was emptied during 3248 // a cleanup and it should be on the free list now), or 3249 // * it's humongous (this means that it was emptied 3250 // during a cleanup and was added to the free list, but 3251 // has been subseqently used to allocate a humongous 3252 // object that may be less than the region size). 3253 3254 alloc_region = NULL; 3255 } 3256 } 3257 3258 if (alloc_region == NULL) { 3259 // we will get a new GC alloc region 3260 alloc_region = newAllocRegionWithExpansion(ap, 0); 3261 } else { 3262 // the region was retained from the last collection 3263 ++_gc_alloc_region_counts[ap]; 3264 if (G1PrintHeapRegions) { 3265 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " 3266 "top "PTR_FORMAT, 3267 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); 3268 } 3269 } 3270 3271 if (alloc_region != NULL) { 3272 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); 3273 set_gc_alloc_region(ap, alloc_region); 3274 } 3275 3276 assert(_gc_alloc_regions[ap] == NULL || 3277 _gc_alloc_regions[ap]->is_gc_alloc_region(), 3278 "the GC alloc region should be tagged as such"); 3279 assert(_gc_alloc_regions[ap] == NULL || 3280 _gc_alloc_regions[ap] == _gc_alloc_region_list, 3281 "the GC alloc region should be the same as the GC alloc list head"); 3282 } 3283 // Set alternative regions for allocation purposes that have reached 3284 // their limit. 3285 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3286 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap); 3287 if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) { 3288 _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose]; 3289 } 3290 } 3291 assert(check_gc_alloc_regions(), "alloc regions messed up"); 3292 } 3293 3294 void G1CollectedHeap::release_gc_alloc_regions(bool totally) { 3295 // We keep a separate list of all regions that have been alloc regions in 3296 // the current collection pause. Forget that now. This method will 3297 // untag the GC alloc regions and tear down the GC alloc region 3298 // list. It's desirable that no regions are tagged as GC alloc 3299 // outside GCs. 3300 forget_alloc_region_list(); 3301 3302 // The current alloc regions contain objs that have survived 3303 // collection. Make them no longer GC alloc regions. 3304 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3305 HeapRegion* r = _gc_alloc_regions[ap]; 3306 _retained_gc_alloc_regions[ap] = NULL; 3307 _gc_alloc_region_counts[ap] = 0; 3308 3309 if (r != NULL) { 3310 // we retain nothing on _gc_alloc_regions between GCs 3311 set_gc_alloc_region(ap, NULL); 3312 3313 if (r->is_empty()) { 3314 // we didn't actually allocate anything in it; let's just put 3315 // it on the free list 3316 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 3317 r->set_zero_fill_complete(); 3318 put_free_region_on_list_locked(r); 3319 } else if (_retain_gc_alloc_region[ap] && !totally) { 3320 // retain it so that we can use it at the beginning of the next GC 3321 _retained_gc_alloc_regions[ap] = r; 3322 } 3323 } 3324 } 3325 } 3326 3327 #ifndef PRODUCT 3328 // Useful for debugging 3329 3330 void G1CollectedHeap::print_gc_alloc_regions() { 3331 gclog_or_tty->print_cr("GC alloc regions"); 3332 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 3333 HeapRegion* r = _gc_alloc_regions[ap]; 3334 if (r == NULL) { 3335 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL); 3336 } else { 3337 gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT, 3338 ap, r->bottom(), r->used()); 3339 } 3340 } 3341 } 3342 #endif // PRODUCT 3343 3344 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { 3345 _drain_in_progress = false; 3346 set_evac_failure_closure(cl); 3347 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 3348 } 3349 3350 void G1CollectedHeap::finalize_for_evac_failure() { 3351 assert(_evac_failure_scan_stack != NULL && 3352 _evac_failure_scan_stack->length() == 0, 3353 "Postcondition"); 3354 assert(!_drain_in_progress, "Postcondition"); 3355 delete _evac_failure_scan_stack; 3356 _evac_failure_scan_stack = NULL; 3357 } 3358 3359 3360 3361 // *** Sequential G1 Evacuation 3362 3363 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) { 3364 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; 3365 // let the caller handle alloc failure 3366 if (alloc_region == NULL) return NULL; 3367 assert(isHumongous(word_size) || !alloc_region->isHumongous(), 3368 "Either the object is humongous or the region isn't"); 3369 HeapWord* block = alloc_region->allocate(word_size); 3370 if (block == NULL) { 3371 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size); 3372 } 3373 return block; 3374 } 3375 3376 class G1IsAliveClosure: public BoolObjectClosure { 3377 G1CollectedHeap* _g1; 3378 public: 3379 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 3380 void do_object(oop p) { assert(false, "Do not call."); } 3381 bool do_object_b(oop p) { 3382 // It is reachable if it is outside the collection set, or is inside 3383 // and forwarded. 3384 3385 #ifdef G1_DEBUG 3386 gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d", 3387 (void*) p, _g1->obj_in_cs(p), p->is_forwarded(), 3388 !_g1->obj_in_cs(p) || p->is_forwarded()); 3389 #endif // G1_DEBUG 3390 3391 return !_g1->obj_in_cs(p) || p->is_forwarded(); 3392 } 3393 }; 3394 3395 class G1KeepAliveClosure: public OopClosure { 3396 G1CollectedHeap* _g1; 3397 public: 3398 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 3399 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } 3400 void do_oop( oop* p) { 3401 oop obj = *p; 3402 #ifdef G1_DEBUG 3403 if (PrintGC && Verbose) { 3404 gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT, 3405 p, (void*) obj, (void*) *p); 3406 } 3407 #endif // G1_DEBUG 3408 3409 if (_g1->obj_in_cs(obj)) { 3410 assert( obj->is_forwarded(), "invariant" ); 3411 *p = obj->forwardee(); 3412 #ifdef G1_DEBUG 3413 gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT, 3414 (void*) obj, (void*) *p); 3415 #endif // G1_DEBUG 3416 } 3417 } 3418 }; 3419 3420 class UpdateRSetDeferred : public OopsInHeapRegionClosure { 3421 private: 3422 G1CollectedHeap* _g1; 3423 DirtyCardQueue *_dcq; 3424 CardTableModRefBS* _ct_bs; 3425 3426 public: 3427 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : 3428 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} 3429 3430 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 3431 virtual void do_oop( oop* p) { do_oop_work(p); } 3432 template <class T> void do_oop_work(T* p) { 3433 assert(_from->is_in_reserved(p), "paranoia"); 3434 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && 3435 !_from->is_survivor()) { 3436 size_t card_index = _ct_bs->index_for(p); 3437 if (_ct_bs->mark_card_deferred(card_index)) { 3438 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); 3439 } 3440 } 3441 } 3442 }; 3443 3444 class RemoveSelfPointerClosure: public ObjectClosure { 3445 private: 3446 G1CollectedHeap* _g1; 3447 ConcurrentMark* _cm; 3448 HeapRegion* _hr; 3449 size_t _prev_marked_bytes; 3450 size_t _next_marked_bytes; 3451 OopsInHeapRegionClosure *_cl; 3452 public: 3453 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : 3454 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), 3455 _next_marked_bytes(0), _cl(cl) {} 3456 3457 size_t prev_marked_bytes() { return _prev_marked_bytes; } 3458 size_t next_marked_bytes() { return _next_marked_bytes; } 3459 3460 // The original idea here was to coalesce evacuated and dead objects. 3461 // However that caused complications with the block offset table (BOT). 3462 // In particular if there were two TLABs, one of them partially refined. 3463 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| 3464 // The BOT entries of the unrefined part of TLAB_2 point to the start 3465 // of TLAB_2. If the last object of the TLAB_1 and the first object 3466 // of TLAB_2 are coalesced, then the cards of the unrefined part 3467 // would point into middle of the filler object. 3468 // 3469 // The current approach is to not coalesce and leave the BOT contents intact. 3470 void do_object(oop obj) { 3471 if (obj->is_forwarded() && obj->forwardee() == obj) { 3472 // The object failed to move. 3473 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); 3474 _cm->markPrev(obj); 3475 assert(_cm->isPrevMarked(obj), "Should be marked!"); 3476 _prev_marked_bytes += (obj->size() * HeapWordSize); 3477 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { 3478 _cm->markAndGrayObjectIfNecessary(obj); 3479 } 3480 obj->set_mark(markOopDesc::prototype()); 3481 // While we were processing RSet buffers during the 3482 // collection, we actually didn't scan any cards on the 3483 // collection set, since we didn't want to update remebered 3484 // sets with entries that point into the collection set, given 3485 // that live objects fromthe collection set are about to move 3486 // and such entries will be stale very soon. This change also 3487 // dealt with a reliability issue which involved scanning a 3488 // card in the collection set and coming across an array that 3489 // was being chunked and looking malformed. The problem is 3490 // that, if evacuation fails, we might have remembered set 3491 // entries missing given that we skipped cards on the 3492 // collection set. So, we'll recreate such entries now. 3493 obj->oop_iterate(_cl); 3494 assert(_cm->isPrevMarked(obj), "Should be marked!"); 3495 } else { 3496 // The object has been either evacuated or is dead. Fill it with a 3497 // dummy object. 3498 MemRegion mr((HeapWord*)obj, obj->size()); 3499 CollectedHeap::fill_with_object(mr); 3500 _cm->clearRangeBothMaps(mr); 3501 } 3502 } 3503 }; 3504 3505 void G1CollectedHeap::remove_self_forwarding_pointers() { 3506 UpdateRSetImmediate immediate_update(_g1h->g1_rem_set()); 3507 DirtyCardQueue dcq(&_g1h->dirty_card_queue_set()); 3508 UpdateRSetDeferred deferred_update(_g1h, &dcq); 3509 OopsInHeapRegionClosure *cl; 3510 if (G1DeferredRSUpdate) { 3511 cl = &deferred_update; 3512 } else { 3513 cl = &immediate_update; 3514 } 3515 HeapRegion* cur = g1_policy()->collection_set(); 3516 while (cur != NULL) { 3517 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 3518 3519 RemoveSelfPointerClosure rspc(_g1h, cl); 3520 if (cur->evacuation_failed()) { 3521 assert(cur->in_collection_set(), "bad CS"); 3522 cl->set_region(cur); 3523 cur->object_iterate(&rspc); 3524 3525 // A number of manipulations to make the TAMS be the current top, 3526 // and the marked bytes be the ones observed in the iteration. 3527 if (_g1h->concurrent_mark()->at_least_one_mark_complete()) { 3528 // The comments below are the postconditions achieved by the 3529 // calls. Note especially the last such condition, which says that 3530 // the count of marked bytes has been properly restored. 3531 cur->note_start_of_marking(false); 3532 // _next_top_at_mark_start == top, _next_marked_bytes == 0 3533 cur->add_to_marked_bytes(rspc.prev_marked_bytes()); 3534 // _next_marked_bytes == prev_marked_bytes. 3535 cur->note_end_of_marking(); 3536 // _prev_top_at_mark_start == top(), 3537 // _prev_marked_bytes == prev_marked_bytes 3538 } 3539 // If there is no mark in progress, we modified the _next variables 3540 // above needlessly, but harmlessly. 3541 if (_g1h->mark_in_progress()) { 3542 cur->note_start_of_marking(false); 3543 // _next_top_at_mark_start == top, _next_marked_bytes == 0 3544 // _next_marked_bytes == next_marked_bytes. 3545 } 3546 3547 // Now make sure the region has the right index in the sorted array. 3548 g1_policy()->note_change_in_marked_bytes(cur); 3549 } 3550 cur = cur->next_in_collection_set(); 3551 } 3552 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 3553 3554 // Now restore saved marks, if any. 3555 if (_objs_with_preserved_marks != NULL) { 3556 assert(_preserved_marks_of_objs != NULL, "Both or none."); 3557 assert(_objs_with_preserved_marks->length() == 3558 _preserved_marks_of_objs->length(), "Both or none."); 3559 guarantee(_objs_with_preserved_marks->length() == 3560 _preserved_marks_of_objs->length(), "Both or none."); 3561 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { 3562 oop obj = _objs_with_preserved_marks->at(i); 3563 markOop m = _preserved_marks_of_objs->at(i); 3564 obj->set_mark(m); 3565 } 3566 // Delete the preserved marks growable arrays (allocated on the C heap). 3567 delete _objs_with_preserved_marks; 3568 delete _preserved_marks_of_objs; 3569 _objs_with_preserved_marks = NULL; 3570 _preserved_marks_of_objs = NULL; 3571 } 3572 } 3573 3574 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { 3575 _evac_failure_scan_stack->push(obj); 3576 } 3577 3578 void G1CollectedHeap::drain_evac_failure_scan_stack() { 3579 assert(_evac_failure_scan_stack != NULL, "precondition"); 3580 3581 while (_evac_failure_scan_stack->length() > 0) { 3582 oop obj = _evac_failure_scan_stack->pop(); 3583 _evac_failure_closure->set_region(heap_region_containing(obj)); 3584 obj->oop_iterate_backwards(_evac_failure_closure); 3585 } 3586 } 3587 3588 void G1CollectedHeap::handle_evacuation_failure(oop old) { 3589 markOop m = old->mark(); 3590 // forward to self 3591 assert(!old->is_forwarded(), "precondition"); 3592 3593 old->forward_to(old); 3594 handle_evacuation_failure_common(old, m); 3595 } 3596 3597 oop 3598 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, 3599 oop old) { 3600 markOop m = old->mark(); 3601 oop forward_ptr = old->forward_to_atomic(old); 3602 if (forward_ptr == NULL) { 3603 // Forward-to-self succeeded. 3604 if (_evac_failure_closure != cl) { 3605 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); 3606 assert(!_drain_in_progress, 3607 "Should only be true while someone holds the lock."); 3608 // Set the global evac-failure closure to the current thread's. 3609 assert(_evac_failure_closure == NULL, "Or locking has failed."); 3610 set_evac_failure_closure(cl); 3611 // Now do the common part. 3612 handle_evacuation_failure_common(old, m); 3613 // Reset to NULL. 3614 set_evac_failure_closure(NULL); 3615 } else { 3616 // The lock is already held, and this is recursive. 3617 assert(_drain_in_progress, "This should only be the recursive case."); 3618 handle_evacuation_failure_common(old, m); 3619 } 3620 return old; 3621 } else { 3622 // Someone else had a place to copy it. 3623 return forward_ptr; 3624 } 3625 } 3626 3627 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { 3628 set_evacuation_failed(true); 3629 3630 preserve_mark_if_necessary(old, m); 3631 3632 HeapRegion* r = heap_region_containing(old); 3633 if (!r->evacuation_failed()) { 3634 r->set_evacuation_failed(true); 3635 if (G1PrintHeapRegions) { 3636 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" " 3637 "["PTR_FORMAT","PTR_FORMAT")\n", 3638 r, r->bottom(), r->end()); 3639 } 3640 } 3641 3642 push_on_evac_failure_scan_stack(old); 3643 3644 if (!_drain_in_progress) { 3645 // prevent recursion in copy_to_survivor_space() 3646 _drain_in_progress = true; 3647 drain_evac_failure_scan_stack(); 3648 _drain_in_progress = false; 3649 } 3650 } 3651 3652 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { 3653 if (m != markOopDesc::prototype()) { 3654 if (_objs_with_preserved_marks == NULL) { 3655 assert(_preserved_marks_of_objs == NULL, "Both or none."); 3656 _objs_with_preserved_marks = 3657 new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 3658 _preserved_marks_of_objs = 3659 new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); 3660 } 3661 _objs_with_preserved_marks->push(obj); 3662 _preserved_marks_of_objs->push(m); 3663 } 3664 } 3665 3666 // *** Parallel G1 Evacuation 3667 3668 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, 3669 size_t word_size) { 3670 assert(!isHumongous(word_size), 3671 err_msg("we should not be seeing humongous allocation requests " 3672 "during GC, word_size = "SIZE_FORMAT, word_size)); 3673 3674 HeapRegion* alloc_region = _gc_alloc_regions[purpose]; 3675 // let the caller handle alloc failure 3676 if (alloc_region == NULL) return NULL; 3677 3678 HeapWord* block = alloc_region->par_allocate(word_size); 3679 if (block == NULL) { 3680 MutexLockerEx x(par_alloc_during_gc_lock(), 3681 Mutex::_no_safepoint_check_flag); 3682 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); 3683 } 3684 return block; 3685 } 3686 3687 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region, 3688 bool par) { 3689 // Another thread might have obtained alloc_region for the given 3690 // purpose, and might be attempting to allocate in it, and might 3691 // succeed. Therefore, we can't do the "finalization" stuff on the 3692 // region below until we're sure the last allocation has happened. 3693 // We ensure this by allocating the remaining space with a garbage 3694 // object. 3695 if (par) par_allocate_remaining_space(alloc_region); 3696 // Now we can do the post-GC stuff on the region. 3697 alloc_region->note_end_of_copying(); 3698 g1_policy()->record_after_bytes(alloc_region->used()); 3699 } 3700 3701 HeapWord* 3702 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, 3703 HeapRegion* alloc_region, 3704 bool par, 3705 size_t word_size) { 3706 assert(!isHumongous(word_size), 3707 err_msg("we should not be seeing humongous allocation requests " 3708 "during GC, word_size = "SIZE_FORMAT, word_size)); 3709 3710 HeapWord* block = NULL; 3711 // In the parallel case, a previous thread to obtain the lock may have 3712 // already assigned a new gc_alloc_region. 3713 if (alloc_region != _gc_alloc_regions[purpose]) { 3714 assert(par, "But should only happen in parallel case."); 3715 alloc_region = _gc_alloc_regions[purpose]; 3716 if (alloc_region == NULL) return NULL; 3717 block = alloc_region->par_allocate(word_size); 3718 if (block != NULL) return block; 3719 // Otherwise, continue; this new region is empty, too. 3720 } 3721 assert(alloc_region != NULL, "We better have an allocation region"); 3722 retire_alloc_region(alloc_region, par); 3723 3724 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { 3725 // Cannot allocate more regions for the given purpose. 3726 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); 3727 // Is there an alternative? 3728 if (purpose != alt_purpose) { 3729 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; 3730 // Has not the alternative region been aliased? 3731 if (alloc_region != alt_region && alt_region != NULL) { 3732 // Try to allocate in the alternative region. 3733 if (par) { 3734 block = alt_region->par_allocate(word_size); 3735 } else { 3736 block = alt_region->allocate(word_size); 3737 } 3738 // Make an alias. 3739 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; 3740 if (block != NULL) { 3741 return block; 3742 } 3743 retire_alloc_region(alt_region, par); 3744 } 3745 // Both the allocation region and the alternative one are full 3746 // and aliased, replace them with a new allocation region. 3747 purpose = alt_purpose; 3748 } else { 3749 set_gc_alloc_region(purpose, NULL); 3750 return NULL; 3751 } 3752 } 3753 3754 // Now allocate a new region for allocation. 3755 alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/); 3756 3757 // let the caller handle alloc failure 3758 if (alloc_region != NULL) { 3759 3760 assert(check_gc_alloc_regions(), "alloc regions messed up"); 3761 assert(alloc_region->saved_mark_at_top(), 3762 "Mark should have been saved already."); 3763 // We used to assert that the region was zero-filled here, but no 3764 // longer. 3765 3766 // This must be done last: once it's installed, other regions may 3767 // allocate in it (without holding the lock.) 3768 set_gc_alloc_region(purpose, alloc_region); 3769 3770 if (par) { 3771 block = alloc_region->par_allocate(word_size); 3772 } else { 3773 block = alloc_region->allocate(word_size); 3774 } 3775 // Caller handles alloc failure. 3776 } else { 3777 // This sets other apis using the same old alloc region to NULL, also. 3778 set_gc_alloc_region(purpose, NULL); 3779 } 3780 return block; // May be NULL. 3781 } 3782 3783 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) { 3784 HeapWord* block = NULL; 3785 size_t free_words; 3786 do { 3787 free_words = r->free()/HeapWordSize; 3788 // If there's too little space, no one can allocate, so we're done. 3789 if (free_words < CollectedHeap::min_fill_size()) return; 3790 // Otherwise, try to claim it. 3791 block = r->par_allocate(free_words); 3792 } while (block == NULL); 3793 fill_with_object(block, free_words); 3794 } 3795 3796 #ifndef PRODUCT 3797 bool GCLabBitMapClosure::do_bit(size_t offset) { 3798 HeapWord* addr = _bitmap->offsetToHeapWord(offset); 3799 guarantee(_cm->isMarked(oop(addr)), "it should be!"); 3800 return true; 3801 } 3802 #endif // PRODUCT 3803 3804 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) 3805 : _g1h(g1h), 3806 _refs(g1h->task_queue(queue_num)), 3807 _dcq(&g1h->dirty_card_queue_set()), 3808 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), 3809 _g1_rem(g1h->g1_rem_set()), 3810 _hash_seed(17), _queue_num(queue_num), 3811 _term_attempts(0), 3812 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), 3813 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), 3814 _age_table(false), 3815 _strong_roots_time(0), _term_time(0), 3816 _alloc_buffer_waste(0), _undo_waste(0) 3817 { 3818 // we allocate G1YoungSurvRateNumRegions plus one entries, since 3819 // we "sacrifice" entry 0 to keep track of surviving bytes for 3820 // non-young regions (where the age is -1) 3821 // We also add a few elements at the beginning and at the end in 3822 // an attempt to eliminate cache contention 3823 size_t real_length = 1 + _g1h->g1_policy()->young_cset_length(); 3824 size_t array_length = PADDING_ELEM_NUM + 3825 real_length + 3826 PADDING_ELEM_NUM; 3827 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); 3828 if (_surviving_young_words_base == NULL) 3829 vm_exit_out_of_memory(array_length * sizeof(size_t), 3830 "Not enough space for young surv histo."); 3831 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; 3832 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); 3833 3834 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; 3835 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; 3836 3837 _start = os::elapsedTime(); 3838 } 3839 3840 void 3841 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st) 3842 { 3843 st->print_raw_cr("GC Termination Stats"); 3844 st->print_raw_cr(" elapsed --strong roots-- -------termination-------" 3845 " ------waste (KiB)------"); 3846 st->print_raw_cr("thr ms ms % ms % attempts" 3847 " total alloc undo"); 3848 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------" 3849 " ------- ------- -------"); 3850 } 3851 3852 void 3853 G1ParScanThreadState::print_termination_stats(int i, 3854 outputStream* const st) const 3855 { 3856 const double elapsed_ms = elapsed_time() * 1000.0; 3857 const double s_roots_ms = strong_roots_time() * 1000.0; 3858 const double term_ms = term_time() * 1000.0; 3859 st->print_cr("%3d %9.2f %9.2f %6.2f " 3860 "%9.2f %6.2f " SIZE_FORMAT_W(8) " " 3861 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), 3862 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, 3863 term_ms, term_ms * 100 / elapsed_ms, term_attempts(), 3864 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K, 3865 alloc_buffer_waste() * HeapWordSize / K, 3866 undo_waste() * HeapWordSize / K); 3867 } 3868 3869 #ifdef ASSERT 3870 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { 3871 assert(ref != NULL, "invariant"); 3872 assert(UseCompressedOops, "sanity"); 3873 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); 3874 oop p = oopDesc::load_decode_heap_oop(ref); 3875 assert(_g1h->is_in_g1_reserved(p), 3876 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); 3877 return true; 3878 } 3879 3880 bool G1ParScanThreadState::verify_ref(oop* ref) const { 3881 assert(ref != NULL, "invariant"); 3882 if (has_partial_array_mask(ref)) { 3883 // Must be in the collection set--it's already been copied. 3884 oop p = clear_partial_array_mask(ref); 3885 assert(_g1h->obj_in_cs(p), 3886 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); 3887 } else { 3888 oop p = oopDesc::load_decode_heap_oop(ref); 3889 assert(_g1h->is_in_g1_reserved(p), 3890 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); 3891 } 3892 return true; 3893 } 3894 3895 bool G1ParScanThreadState::verify_task(StarTask ref) const { 3896 if (ref.is_narrow()) { 3897 return verify_ref((narrowOop*) ref); 3898 } else { 3899 return verify_ref((oop*) ref); 3900 } 3901 } 3902 #endif // ASSERT 3903 3904 void G1ParScanThreadState::trim_queue() { 3905 StarTask ref; 3906 do { 3907 // Drain the overflow stack first, so other threads can steal. 3908 while (refs()->pop_overflow(ref)) { 3909 deal_with_reference(ref); 3910 } 3911 while (refs()->pop_local(ref)) { 3912 deal_with_reference(ref); 3913 } 3914 } while (!refs()->is_empty()); 3915 } 3916 3917 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : 3918 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), 3919 _par_scan_state(par_scan_state) { } 3920 3921 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) { 3922 // This is called _after_ do_oop_work has been called, hence after 3923 // the object has been relocated to its new location and *p points 3924 // to its new location. 3925 3926 T heap_oop = oopDesc::load_heap_oop(p); 3927 if (!oopDesc::is_null(heap_oop)) { 3928 oop obj = oopDesc::decode_heap_oop(heap_oop); 3929 assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), 3930 "shouldn't still be in the CSet if evacuation didn't fail."); 3931 HeapWord* addr = (HeapWord*)obj; 3932 if (_g1->is_in_g1_reserved(addr)) 3933 _cm->grayRoot(oop(addr)); 3934 } 3935 } 3936 3937 oop G1ParCopyHelper::copy_to_survivor_space(oop old) { 3938 size_t word_sz = old->size(); 3939 HeapRegion* from_region = _g1->heap_region_containing_raw(old); 3940 // +1 to make the -1 indexes valid... 3941 int young_index = from_region->young_index_in_cset()+1; 3942 assert( (from_region->is_young() && young_index > 0) || 3943 (!from_region->is_young() && young_index == 0), "invariant" ); 3944 G1CollectorPolicy* g1p = _g1->g1_policy(); 3945 markOop m = old->mark(); 3946 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() 3947 : m->age(); 3948 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, 3949 word_sz); 3950 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); 3951 oop obj = oop(obj_ptr); 3952 3953 if (obj_ptr == NULL) { 3954 // This will either forward-to-self, or detect that someone else has 3955 // installed a forwarding pointer. 3956 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); 3957 return _g1->handle_evacuation_failure_par(cl, old); 3958 } 3959 3960 // We're going to allocate linearly, so might as well prefetch ahead. 3961 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 3962 3963 oop forward_ptr = old->forward_to_atomic(obj); 3964 if (forward_ptr == NULL) { 3965 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 3966 if (g1p->track_object_age(alloc_purpose)) { 3967 // We could simply do obj->incr_age(). However, this causes a 3968 // performance issue. obj->incr_age() will first check whether 3969 // the object has a displaced mark by checking its mark word; 3970 // getting the mark word from the new location of the object 3971 // stalls. So, given that we already have the mark word and we 3972 // are about to install it anyway, it's better to increase the 3973 // age on the mark word, when the object does not have a 3974 // displaced mark word. We're not expecting many objects to have 3975 // a displaced marked word, so that case is not optimized 3976 // further (it could be...) and we simply call obj->incr_age(). 3977 3978 if (m->has_displaced_mark_helper()) { 3979 // in this case, we have to install the mark word first, 3980 // otherwise obj looks to be forwarded (the old mark word, 3981 // which contains the forward pointer, was copied) 3982 obj->set_mark(m); 3983 obj->incr_age(); 3984 } else { 3985 m = m->incr_age(); 3986 obj->set_mark(m); 3987 } 3988 _par_scan_state->age_table()->add(obj, word_sz); 3989 } else { 3990 obj->set_mark(m); 3991 } 3992 3993 // preserve "next" mark bit 3994 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { 3995 if (!use_local_bitmaps || 3996 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { 3997 // if we couldn't mark it on the local bitmap (this happens when 3998 // the object was not allocated in the GCLab), we have to bite 3999 // the bullet and do the standard parallel mark 4000 _cm->markAndGrayObjectIfNecessary(obj); 4001 } 4002 #if 1 4003 if (_g1->isMarkedNext(old)) { 4004 _cm->nextMarkBitMap()->parClear((HeapWord*)old); 4005 } 4006 #endif 4007 } 4008 4009 size_t* surv_young_words = _par_scan_state->surviving_young_words(); 4010 surv_young_words[young_index] += word_sz; 4011 4012 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 4013 arrayOop(old)->set_length(0); 4014 oop* old_p = set_partial_array_mask(old); 4015 _par_scan_state->push_on_queue(old_p); 4016 } else { 4017 // No point in using the slower heap_region_containing() method, 4018 // given that we know obj is in the heap. 4019 _scanner->set_region(_g1->heap_region_containing_raw(obj)); 4020 obj->oop_iterate_backwards(_scanner); 4021 } 4022 } else { 4023 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); 4024 obj = forward_ptr; 4025 } 4026 return obj; 4027 } 4028 4029 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> 4030 template <class T> 4031 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee> 4032 ::do_oop_work(T* p) { 4033 oop obj = oopDesc::load_decode_heap_oop(p); 4034 assert(barrier != G1BarrierRS || obj != NULL, 4035 "Precondition: G1BarrierRS implies obj is nonNull"); 4036 4037 // here the null check is implicit in the cset_fast_test() test 4038 if (_g1->in_cset_fast_test(obj)) { 4039 #if G1_REM_SET_LOGGING 4040 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " 4041 "into CS.", p, (void*) obj); 4042 #endif 4043 if (obj->is_forwarded()) { 4044 oopDesc::encode_store_heap_oop(p, obj->forwardee()); 4045 } else { 4046 oop copy_oop = copy_to_survivor_space(obj); 4047 oopDesc::encode_store_heap_oop(p, copy_oop); 4048 } 4049 // When scanning the RS, we only care about objs in CS. 4050 if (barrier == G1BarrierRS) { 4051 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 4052 } 4053 } 4054 4055 if (barrier == G1BarrierEvac && obj != NULL) { 4056 _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num()); 4057 } 4058 4059 if (do_gen_barrier && obj != NULL) { 4060 par_do_barrier(p); 4061 } 4062 } 4063 4064 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); 4065 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p); 4066 4067 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { 4068 assert(has_partial_array_mask(p), "invariant"); 4069 oop old = clear_partial_array_mask(p); 4070 assert(old->is_objArray(), "must be obj array"); 4071 assert(old->is_forwarded(), "must be forwarded"); 4072 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 4073 4074 objArrayOop obj = objArrayOop(old->forwardee()); 4075 assert((void*)old != (void*)old->forwardee(), "self forwarding here?"); 4076 // Process ParGCArrayScanChunk elements now 4077 // and push the remainder back onto queue 4078 int start = arrayOop(old)->length(); 4079 int end = obj->length(); 4080 int remainder = end - start; 4081 assert(start <= end, "just checking"); 4082 if (remainder > 2 * ParGCArrayScanChunk) { 4083 // Test above combines last partial chunk with a full chunk 4084 end = start + ParGCArrayScanChunk; 4085 arrayOop(old)->set_length(end); 4086 // Push remainder. 4087 oop* old_p = set_partial_array_mask(old); 4088 assert(arrayOop(old)->length() < obj->length(), "Empty push?"); 4089 _par_scan_state->push_on_queue(old_p); 4090 } else { 4091 // Restore length so that the heap remains parsable in 4092 // case of evacuation failure. 4093 arrayOop(old)->set_length(end); 4094 } 4095 _scanner.set_region(_g1->heap_region_containing_raw(obj)); 4096 // process our set of indices (include header in first chunk) 4097 obj->oop_iterate_range(&_scanner, start, end); 4098 } 4099 4100 class G1ParEvacuateFollowersClosure : public VoidClosure { 4101 protected: 4102 G1CollectedHeap* _g1h; 4103 G1ParScanThreadState* _par_scan_state; 4104 RefToScanQueueSet* _queues; 4105 ParallelTaskTerminator* _terminator; 4106 4107 G1ParScanThreadState* par_scan_state() { return _par_scan_state; } 4108 RefToScanQueueSet* queues() { return _queues; } 4109 ParallelTaskTerminator* terminator() { return _terminator; } 4110 4111 public: 4112 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h, 4113 G1ParScanThreadState* par_scan_state, 4114 RefToScanQueueSet* queues, 4115 ParallelTaskTerminator* terminator) 4116 : _g1h(g1h), _par_scan_state(par_scan_state), 4117 _queues(queues), _terminator(terminator) {} 4118 4119 void do_void(); 4120 4121 private: 4122 inline bool offer_termination(); 4123 }; 4124 4125 bool G1ParEvacuateFollowersClosure::offer_termination() { 4126 G1ParScanThreadState* const pss = par_scan_state(); 4127 pss->start_term_time(); 4128 const bool res = terminator()->offer_termination(); 4129 pss->end_term_time(); 4130 return res; 4131 } 4132 4133 void G1ParEvacuateFollowersClosure::do_void() { 4134 StarTask stolen_task; 4135 G1ParScanThreadState* const pss = par_scan_state(); 4136 pss->trim_queue(); 4137 4138 do { 4139 while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { 4140 assert(pss->verify_task(stolen_task), "sanity"); 4141 if (stolen_task.is_narrow()) { 4142 pss->push_on_queue((narrowOop*) stolen_task); 4143 } else { 4144 pss->push_on_queue((oop*) stolen_task); 4145 } 4146 pss->trim_queue(); 4147 } 4148 } while (!offer_termination()); 4149 4150 pss->retire_alloc_buffers(); 4151 } 4152 4153 class G1ParTask : public AbstractGangTask { 4154 protected: 4155 G1CollectedHeap* _g1h; 4156 RefToScanQueueSet *_queues; 4157 ParallelTaskTerminator _terminator; 4158 int _n_workers; 4159 4160 Mutex _stats_lock; 4161 Mutex* stats_lock() { return &_stats_lock; } 4162 4163 size_t getNCards() { 4164 return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1) 4165 / G1BlockOffsetSharedArray::N_bytes; 4166 } 4167 4168 public: 4169 G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues) 4170 : AbstractGangTask("G1 collection"), 4171 _g1h(g1h), 4172 _queues(task_queues), 4173 _terminator(workers, _queues), 4174 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true), 4175 _n_workers(workers) 4176 {} 4177 4178 RefToScanQueueSet* queues() { return _queues; } 4179 4180 RefToScanQueue *work_queue(int i) { 4181 return queues()->queue(i); 4182 } 4183 4184 void work(int i) { 4185 if (i >= _n_workers) return; // no work needed this round 4186 4187 double start_time_ms = os::elapsedTime() * 1000.0; 4188 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); 4189 4190 ResourceMark rm; 4191 HandleMark hm; 4192 4193 G1ParScanThreadState pss(_g1h, i); 4194 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); 4195 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); 4196 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); 4197 4198 pss.set_evac_closure(&scan_evac_cl); 4199 pss.set_evac_failure_closure(&evac_failure_cl); 4200 pss.set_partial_scan_closure(&partial_scan_cl); 4201 4202 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss); 4203 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss); 4204 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss); 4205 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss); 4206 4207 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss); 4208 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss); 4209 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss); 4210 4211 OopsInHeapRegionClosure *scan_root_cl; 4212 OopsInHeapRegionClosure *scan_perm_cl; 4213 4214 if (_g1h->g1_policy()->during_initial_mark_pause()) { 4215 scan_root_cl = &scan_mark_root_cl; 4216 scan_perm_cl = &scan_mark_perm_cl; 4217 } else { 4218 scan_root_cl = &only_scan_root_cl; 4219 scan_perm_cl = &only_scan_perm_cl; 4220 } 4221 4222 pss.start_strong_roots(); 4223 _g1h->g1_process_strong_roots(/* not collecting perm */ false, 4224 SharedHeap::SO_AllClasses, 4225 scan_root_cl, 4226 &push_heap_rs_cl, 4227 scan_perm_cl, 4228 i); 4229 pss.end_strong_roots(); 4230 { 4231 double start = os::elapsedTime(); 4232 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); 4233 evac.do_void(); 4234 double elapsed_ms = (os::elapsedTime()-start)*1000.0; 4235 double term_ms = pss.term_time()*1000.0; 4236 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); 4237 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); 4238 } 4239 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); 4240 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); 4241 4242 // Clean up any par-expanded rem sets. 4243 HeapRegionRemSet::par_cleanup(); 4244 4245 if (ParallelGCVerbose) { 4246 MutexLocker x(stats_lock()); 4247 pss.print_termination_stats(i); 4248 } 4249 4250 assert(pss.refs()->is_empty(), "should be empty"); 4251 double end_time_ms = os::elapsedTime() * 1000.0; 4252 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); 4253 } 4254 }; 4255 4256 // *** Common G1 Evacuation Stuff 4257 4258 // This method is run in a GC worker. 4259 4260 void 4261 G1CollectedHeap:: 4262 g1_process_strong_roots(bool collecting_perm_gen, 4263 SharedHeap::ScanningOption so, 4264 OopClosure* scan_non_heap_roots, 4265 OopsInHeapRegionClosure* scan_rs, 4266 OopsInGenClosure* scan_perm, 4267 int worker_i) { 4268 // First scan the strong roots, including the perm gen. 4269 double ext_roots_start = os::elapsedTime(); 4270 double closure_app_time_sec = 0.0; 4271 4272 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); 4273 BufferingOopsInGenClosure buf_scan_perm(scan_perm); 4274 buf_scan_perm.set_generation(perm_gen()); 4275 4276 // Walk the code cache w/o buffering, because StarTask cannot handle 4277 // unaligned oop locations. 4278 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true); 4279 4280 process_strong_roots(false, // no scoping; this is parallel code 4281 collecting_perm_gen, so, 4282 &buf_scan_non_heap_roots, 4283 &eager_scan_code_roots, 4284 &buf_scan_perm); 4285 4286 // Finish up any enqueued closure apps. 4287 buf_scan_non_heap_roots.done(); 4288 buf_scan_perm.done(); 4289 double ext_roots_end = os::elapsedTime(); 4290 g1_policy()->reset_obj_copy_time(worker_i); 4291 double obj_copy_time_sec = 4292 buf_scan_non_heap_roots.closure_app_seconds() + 4293 buf_scan_perm.closure_app_seconds(); 4294 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); 4295 double ext_root_time_ms = 4296 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0; 4297 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms); 4298 4299 // Scan strong roots in mark stack. 4300 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) { 4301 concurrent_mark()->oops_do(scan_non_heap_roots); 4302 } 4303 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0; 4304 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms); 4305 4306 // XXX What should this be doing in the parallel case? 4307 g1_policy()->record_collection_pause_end_CH_strong_roots(); 4308 // Now scan the complement of the collection set. 4309 if (scan_rs != NULL) { 4310 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); 4311 } 4312 // Finish with the ref_processor roots. 4313 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) { 4314 ref_processor()->oops_do(scan_non_heap_roots); 4315 } 4316 g1_policy()->record_collection_pause_end_G1_strong_roots(); 4317 _process_strong_tasks->all_tasks_completed(); 4318 } 4319 4320 void 4321 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, 4322 OopClosure* non_root_closure) { 4323 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); 4324 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); 4325 } 4326 4327 4328 class SaveMarksClosure: public HeapRegionClosure { 4329 public: 4330 bool doHeapRegion(HeapRegion* r) { 4331 r->save_marks(); 4332 return false; 4333 } 4334 }; 4335 4336 void G1CollectedHeap::save_marks() { 4337 if (!CollectedHeap::use_parallel_gc_threads()) { 4338 SaveMarksClosure sm; 4339 heap_region_iterate(&sm); 4340 } 4341 // We do this even in the parallel case 4342 perm_gen()->save_marks(); 4343 } 4344 4345 void G1CollectedHeap::evacuate_collection_set() { 4346 set_evacuation_failed(false); 4347 4348 g1_rem_set()->prepare_for_oops_into_collection_set_do(); 4349 concurrent_g1_refine()->set_use_cache(false); 4350 concurrent_g1_refine()->clear_hot_cache_claimed_index(); 4351 4352 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1); 4353 set_par_threads(n_workers); 4354 G1ParTask g1_par_task(this, n_workers, _task_queues); 4355 4356 init_for_evac_failure(NULL); 4357 4358 rem_set()->prepare_for_younger_refs_iterate(true); 4359 4360 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); 4361 double start_par = os::elapsedTime(); 4362 if (G1CollectedHeap::use_parallel_gc_threads()) { 4363 // The individual threads will set their evac-failure closures. 4364 StrongRootsScope srs(this); 4365 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr(); 4366 workers()->run_task(&g1_par_task); 4367 } else { 4368 StrongRootsScope srs(this); 4369 g1_par_task.work(0); 4370 } 4371 4372 double par_time = (os::elapsedTime() - start_par) * 1000.0; 4373 g1_policy()->record_par_time(par_time); 4374 set_par_threads(0); 4375 // Is this the right thing to do here? We don't save marks 4376 // on individual heap regions when we allocate from 4377 // them in parallel, so this seems like the correct place for this. 4378 retire_all_alloc_regions(); 4379 { 4380 G1IsAliveClosure is_alive(this); 4381 G1KeepAliveClosure keep_alive(this); 4382 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 4383 } 4384 release_gc_alloc_regions(false /* totally */); 4385 g1_rem_set()->cleanup_after_oops_into_collection_set_do(); 4386 4387 concurrent_g1_refine()->clear_hot_cache(); 4388 concurrent_g1_refine()->set_use_cache(true); 4389 4390 finalize_for_evac_failure(); 4391 4392 // Must do this before removing self-forwarding pointers, which clears 4393 // the per-region evac-failure flags. 4394 concurrent_mark()->complete_marking_in_collection_set(); 4395 4396 if (evacuation_failed()) { 4397 remove_self_forwarding_pointers(); 4398 if (PrintGCDetails) { 4399 gclog_or_tty->print(" (to-space overflow)"); 4400 } else if (PrintGC) { 4401 gclog_or_tty->print("--"); 4402 } 4403 } 4404 4405 if (G1DeferredRSUpdate) { 4406 RedirtyLoggedCardTableEntryFastClosure redirty; 4407 dirty_card_queue_set().set_closure(&redirty); 4408 dirty_card_queue_set().apply_closure_to_all_completed_buffers(); 4409 4410 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); 4411 dcq.merge_bufferlists(&dirty_card_queue_set()); 4412 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 4413 } 4414 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 4415 } 4416 4417 void G1CollectedHeap::free_region(HeapRegion* hr) { 4418 size_t pre_used = 0; 4419 size_t cleared_h_regions = 0; 4420 size_t freed_regions = 0; 4421 UncleanRegionList local_list; 4422 4423 HeapWord* start = hr->bottom(); 4424 HeapWord* end = hr->prev_top_at_mark_start(); 4425 size_t used_bytes = hr->used(); 4426 size_t live_bytes = hr->max_live_bytes(); 4427 if (used_bytes > 0) { 4428 guarantee( live_bytes <= used_bytes, "invariant" ); 4429 } else { 4430 guarantee( live_bytes == 0, "invariant" ); 4431 } 4432 4433 size_t garbage_bytes = used_bytes - live_bytes; 4434 if (garbage_bytes > 0) 4435 g1_policy()->decrease_known_garbage_bytes(garbage_bytes); 4436 4437 free_region_work(hr, pre_used, cleared_h_regions, freed_regions, 4438 &local_list); 4439 finish_free_region_work(pre_used, cleared_h_regions, freed_regions, 4440 &local_list); 4441 } 4442 4443 void 4444 G1CollectedHeap::free_region_work(HeapRegion* hr, 4445 size_t& pre_used, 4446 size_t& cleared_h_regions, 4447 size_t& freed_regions, 4448 UncleanRegionList* list, 4449 bool par) { 4450 pre_used += hr->used(); 4451 if (hr->isHumongous()) { 4452 assert(hr->startsHumongous(), 4453 "Only the start of a humongous region should be freed."); 4454 int ind = _hrs->find(hr); 4455 assert(ind != -1, "Should have an index."); 4456 // Clear the start region. 4457 hr->hr_clear(par, true /*clear_space*/); 4458 list->insert_before_head(hr); 4459 cleared_h_regions++; 4460 freed_regions++; 4461 // Clear any continued regions. 4462 ind++; 4463 while ((size_t)ind < n_regions()) { 4464 HeapRegion* hrc = _hrs->at(ind); 4465 if (!hrc->continuesHumongous()) break; 4466 // Otherwise, does continue the H region. 4467 assert(hrc->humongous_start_region() == hr, "Huh?"); 4468 hrc->hr_clear(par, true /*clear_space*/); 4469 cleared_h_regions++; 4470 freed_regions++; 4471 list->insert_before_head(hrc); 4472 ind++; 4473 } 4474 } else { 4475 hr->hr_clear(par, true /*clear_space*/); 4476 list->insert_before_head(hr); 4477 freed_regions++; 4478 // If we're using clear2, this should not be enabled. 4479 // assert(!hr->in_cohort(), "Can't be both free and in a cohort."); 4480 } 4481 } 4482 4483 void G1CollectedHeap::finish_free_region_work(size_t pre_used, 4484 size_t cleared_h_regions, 4485 size_t freed_regions, 4486 UncleanRegionList* list) { 4487 if (list != NULL && list->sz() > 0) { 4488 prepend_region_list_on_unclean_list(list); 4489 } 4490 // Acquire a lock, if we're parallel, to update possibly-shared 4491 // variables. 4492 Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL; 4493 { 4494 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); 4495 _summary_bytes_used -= pre_used; 4496 _num_humongous_regions -= (int) cleared_h_regions; 4497 _free_regions += freed_regions; 4498 } 4499 } 4500 4501 4502 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) { 4503 while (list != NULL) { 4504 guarantee( list->is_young(), "invariant" ); 4505 4506 HeapWord* bottom = list->bottom(); 4507 HeapWord* end = list->end(); 4508 MemRegion mr(bottom, end); 4509 ct_bs->dirty(mr); 4510 4511 list = list->get_next_young_region(); 4512 } 4513 } 4514 4515 4516 class G1ParCleanupCTTask : public AbstractGangTask { 4517 CardTableModRefBS* _ct_bs; 4518 G1CollectedHeap* _g1h; 4519 HeapRegion* volatile _su_head; 4520 public: 4521 G1ParCleanupCTTask(CardTableModRefBS* ct_bs, 4522 G1CollectedHeap* g1h, 4523 HeapRegion* survivor_list) : 4524 AbstractGangTask("G1 Par Cleanup CT Task"), 4525 _ct_bs(ct_bs), 4526 _g1h(g1h), 4527 _su_head(survivor_list) 4528 { } 4529 4530 void work(int i) { 4531 HeapRegion* r; 4532 while (r = _g1h->pop_dirty_cards_region()) { 4533 clear_cards(r); 4534 } 4535 // Redirty the cards of the survivor regions. 4536 dirty_list(&this->_su_head); 4537 } 4538 4539 void clear_cards(HeapRegion* r) { 4540 // Cards for Survivor regions will be dirtied later. 4541 if (!r->is_survivor()) { 4542 _ct_bs->clear(MemRegion(r->bottom(), r->end())); 4543 } 4544 } 4545 4546 void dirty_list(HeapRegion* volatile * head_ptr) { 4547 HeapRegion* head; 4548 do { 4549 // Pop region off the list. 4550 head = *head_ptr; 4551 if (head != NULL) { 4552 HeapRegion* r = (HeapRegion*) 4553 Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head); 4554 if (r == head) { 4555 assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list"); 4556 _ct_bs->dirty(MemRegion(r->bottom(), r->end())); 4557 } 4558 } 4559 } while (*head_ptr != NULL); 4560 } 4561 }; 4562 4563 4564 #ifndef PRODUCT 4565 class G1VerifyCardTableCleanup: public HeapRegionClosure { 4566 CardTableModRefBS* _ct_bs; 4567 public: 4568 G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) 4569 : _ct_bs(ct_bs) 4570 { } 4571 virtual bool doHeapRegion(HeapRegion* r) 4572 { 4573 MemRegion mr(r->bottom(), r->end()); 4574 if (r->is_survivor()) { 4575 _ct_bs->verify_dirty_region(mr); 4576 } else { 4577 _ct_bs->verify_clean_region(mr); 4578 } 4579 return false; 4580 } 4581 }; 4582 #endif 4583 4584 void G1CollectedHeap::cleanUpCardTable() { 4585 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); 4586 double start = os::elapsedTime(); 4587 4588 // Iterate over the dirty cards region list. 4589 G1ParCleanupCTTask cleanup_task(ct_bs, this, 4590 _young_list->first_survivor_region()); 4591 4592 if (ParallelGCThreads > 0) { 4593 set_par_threads(workers()->total_workers()); 4594 workers()->run_task(&cleanup_task); 4595 set_par_threads(0); 4596 } else { 4597 while (_dirty_cards_region_list) { 4598 HeapRegion* r = _dirty_cards_region_list; 4599 cleanup_task.clear_cards(r); 4600 _dirty_cards_region_list = r->get_next_dirty_cards_region(); 4601 if (_dirty_cards_region_list == r) { 4602 // The last region. 4603 _dirty_cards_region_list = NULL; 4604 } 4605 r->set_next_dirty_cards_region(NULL); 4606 } 4607 // now, redirty the cards of the survivor regions 4608 // (it seemed faster to do it this way, instead of iterating over 4609 // all regions and then clearing / dirtying as appropriate) 4610 dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); 4611 } 4612 4613 double elapsed = os::elapsedTime() - start; 4614 g1_policy()->record_clear_ct_time( elapsed * 1000.0); 4615 #ifndef PRODUCT 4616 if (G1VerifyCTCleanup || VerifyAfterGC) { 4617 G1VerifyCardTableCleanup cleanup_verifier(ct_bs); 4618 heap_region_iterate(&cleanup_verifier); 4619 } 4620 #endif 4621 } 4622 4623 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) { 4624 if (g1_policy()->should_do_collection_pause(word_size)) { 4625 do_collection_pause(); 4626 } 4627 } 4628 4629 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { 4630 double young_time_ms = 0.0; 4631 double non_young_time_ms = 0.0; 4632 4633 // Since the collection set is a superset of the the young list, 4634 // all we need to do to clear the young list is clear its 4635 // head and length, and unlink any young regions in the code below 4636 _young_list->clear(); 4637 4638 G1CollectorPolicy* policy = g1_policy(); 4639 4640 double start_sec = os::elapsedTime(); 4641 bool non_young = true; 4642 4643 HeapRegion* cur = cs_head; 4644 int age_bound = -1; 4645 size_t rs_lengths = 0; 4646 4647 while (cur != NULL) { 4648 if (non_young) { 4649 if (cur->is_young()) { 4650 double end_sec = os::elapsedTime(); 4651 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4652 non_young_time_ms += elapsed_ms; 4653 4654 start_sec = os::elapsedTime(); 4655 non_young = false; 4656 } 4657 } else { 4658 if (!cur->is_on_free_list()) { 4659 double end_sec = os::elapsedTime(); 4660 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4661 young_time_ms += elapsed_ms; 4662 4663 start_sec = os::elapsedTime(); 4664 non_young = true; 4665 } 4666 } 4667 4668 rs_lengths += cur->rem_set()->occupied(); 4669 4670 HeapRegion* next = cur->next_in_collection_set(); 4671 assert(cur->in_collection_set(), "bad CS"); 4672 cur->set_next_in_collection_set(NULL); 4673 cur->set_in_collection_set(false); 4674 4675 if (cur->is_young()) { 4676 int index = cur->young_index_in_cset(); 4677 guarantee( index != -1, "invariant" ); 4678 guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); 4679 size_t words_survived = _surviving_young_words[index]; 4680 cur->record_surv_words_in_group(words_survived); 4681 4682 // At this point the we have 'popped' cur from the collection set 4683 // (linked via next_in_collection_set()) but it is still in the 4684 // young list (linked via next_young_region()). Clear the 4685 // _next_young_region field. 4686 cur->set_next_young_region(NULL); 4687 } else { 4688 int index = cur->young_index_in_cset(); 4689 guarantee( index == -1, "invariant" ); 4690 } 4691 4692 assert( (cur->is_young() && cur->young_index_in_cset() > -1) || 4693 (!cur->is_young() && cur->young_index_in_cset() == -1), 4694 "invariant" ); 4695 4696 if (!cur->evacuation_failed()) { 4697 // And the region is empty. 4698 assert(!cur->is_empty(), 4699 "Should not have empty regions in a CS."); 4700 free_region(cur); 4701 } else { 4702 cur->uninstall_surv_rate_group(); 4703 if (cur->is_young()) 4704 cur->set_young_index_in_cset(-1); 4705 cur->set_not_young(); 4706 cur->set_evacuation_failed(false); 4707 } 4708 cur = next; 4709 } 4710 4711 policy->record_max_rs_lengths(rs_lengths); 4712 policy->cset_regions_freed(); 4713 4714 double end_sec = os::elapsedTime(); 4715 double elapsed_ms = (end_sec - start_sec) * 1000.0; 4716 if (non_young) 4717 non_young_time_ms += elapsed_ms; 4718 else 4719 young_time_ms += elapsed_ms; 4720 4721 policy->record_young_free_cset_time_ms(young_time_ms); 4722 policy->record_non_young_free_cset_time_ms(non_young_time_ms); 4723 } 4724 4725 // This routine is similar to the above but does not record 4726 // any policy statistics or update free lists; we are abandoning 4727 // the current incremental collection set in preparation of a 4728 // full collection. After the full GC we will start to build up 4729 // the incremental collection set again. 4730 // This is only called when we're doing a full collection 4731 // and is immediately followed by the tearing down of the young list. 4732 4733 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { 4734 HeapRegion* cur = cs_head; 4735 4736 while (cur != NULL) { 4737 HeapRegion* next = cur->next_in_collection_set(); 4738 assert(cur->in_collection_set(), "bad CS"); 4739 cur->set_next_in_collection_set(NULL); 4740 cur->set_in_collection_set(false); 4741 cur->set_young_index_in_cset(-1); 4742 cur = next; 4743 } 4744 } 4745 4746 HeapRegion* 4747 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { 4748 assert(ZF_mon->owned_by_self(), "Precondition"); 4749 HeapRegion* res = pop_unclean_region_list_locked(); 4750 if (res != NULL) { 4751 assert(!res->continuesHumongous() && 4752 res->zero_fill_state() != HeapRegion::Allocated, 4753 "Only free regions on unclean list."); 4754 if (zero_filled) { 4755 res->ensure_zero_filled_locked(); 4756 res->set_zero_fill_allocated(); 4757 } 4758 } 4759 return res; 4760 } 4761 4762 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) { 4763 MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag); 4764 return alloc_region_from_unclean_list_locked(zero_filled); 4765 } 4766 4767 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) { 4768 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4769 put_region_on_unclean_list_locked(r); 4770 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. 4771 } 4772 4773 void G1CollectedHeap::set_unclean_regions_coming(bool b) { 4774 MutexLockerEx x(Cleanup_mon); 4775 set_unclean_regions_coming_locked(b); 4776 } 4777 4778 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) { 4779 assert(Cleanup_mon->owned_by_self(), "Precondition"); 4780 _unclean_regions_coming = b; 4781 // Wake up mutator threads that might be waiting for completeCleanup to 4782 // finish. 4783 if (!b) Cleanup_mon->notify_all(); 4784 } 4785 4786 void G1CollectedHeap::wait_for_cleanup_complete() { 4787 MutexLockerEx x(Cleanup_mon); 4788 wait_for_cleanup_complete_locked(); 4789 } 4790 4791 void G1CollectedHeap::wait_for_cleanup_complete_locked() { 4792 assert(Cleanup_mon->owned_by_self(), "precondition"); 4793 while (_unclean_regions_coming) { 4794 Cleanup_mon->wait(); 4795 } 4796 } 4797 4798 void 4799 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { 4800 assert(ZF_mon->owned_by_self(), "precondition."); 4801 #ifdef ASSERT 4802 if (r->is_gc_alloc_region()) { 4803 ResourceMark rm; 4804 stringStream region_str; 4805 print_on(®ion_str); 4806 assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s", 4807 region_str.as_string())); 4808 } 4809 #endif 4810 _unclean_region_list.insert_before_head(r); 4811 } 4812 4813 void 4814 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) { 4815 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4816 prepend_region_list_on_unclean_list_locked(list); 4817 if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread. 4818 } 4819 4820 void 4821 G1CollectedHeap:: 4822 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) { 4823 assert(ZF_mon->owned_by_self(), "precondition."); 4824 _unclean_region_list.prepend_list(list); 4825 } 4826 4827 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() { 4828 assert(ZF_mon->owned_by_self(), "precondition."); 4829 HeapRegion* res = _unclean_region_list.pop(); 4830 if (res != NULL) { 4831 // Inform ZF thread that there's a new unclean head. 4832 if (_unclean_region_list.hd() != NULL && should_zf()) 4833 ZF_mon->notify_all(); 4834 } 4835 return res; 4836 } 4837 4838 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() { 4839 assert(ZF_mon->owned_by_self(), "precondition."); 4840 return _unclean_region_list.hd(); 4841 } 4842 4843 4844 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() { 4845 assert(ZF_mon->owned_by_self(), "Precondition"); 4846 HeapRegion* r = peek_unclean_region_list_locked(); 4847 if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) { 4848 // Result of below must be equal to "r", since we hold the lock. 4849 (void)pop_unclean_region_list_locked(); 4850 put_free_region_on_list_locked(r); 4851 return true; 4852 } else { 4853 return false; 4854 } 4855 } 4856 4857 bool G1CollectedHeap::move_cleaned_region_to_free_list() { 4858 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4859 return move_cleaned_region_to_free_list_locked(); 4860 } 4861 4862 4863 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) { 4864 assert(ZF_mon->owned_by_self(), "precondition."); 4865 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4866 assert(r->zero_fill_state() == HeapRegion::ZeroFilled, 4867 "Regions on free list must be zero filled"); 4868 assert(!r->isHumongous(), "Must not be humongous."); 4869 assert(r->is_empty(), "Better be empty"); 4870 assert(!r->is_on_free_list(), 4871 "Better not already be on free list"); 4872 assert(!r->is_on_unclean_list(), 4873 "Better not already be on unclean list"); 4874 r->set_on_free_list(true); 4875 r->set_next_on_free_list(_free_region_list); 4876 _free_region_list = r; 4877 _free_region_list_size++; 4878 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4879 } 4880 4881 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) { 4882 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4883 put_free_region_on_list_locked(r); 4884 } 4885 4886 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() { 4887 assert(ZF_mon->owned_by_self(), "precondition."); 4888 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4889 HeapRegion* res = _free_region_list; 4890 if (res != NULL) { 4891 _free_region_list = res->next_from_free_list(); 4892 _free_region_list_size--; 4893 res->set_on_free_list(false); 4894 res->set_next_on_free_list(NULL); 4895 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4896 } 4897 return res; 4898 } 4899 4900 4901 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) { 4902 // By self, or on behalf of self. 4903 assert(Heap_lock->is_locked(), "Precondition"); 4904 HeapRegion* res = NULL; 4905 bool first = true; 4906 while (res == NULL) { 4907 if (zero_filled || !first) { 4908 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4909 res = pop_free_region_list_locked(); 4910 if (res != NULL) { 4911 assert(!res->zero_fill_is_allocated(), 4912 "No allocated regions on free list."); 4913 res->set_zero_fill_allocated(); 4914 } else if (!first) { 4915 break; // We tried both, time to return NULL. 4916 } 4917 } 4918 4919 if (res == NULL) { 4920 res = alloc_region_from_unclean_list(zero_filled); 4921 } 4922 assert(res == NULL || 4923 !zero_filled || 4924 res->zero_fill_is_allocated(), 4925 "We must have allocated the region we're returning"); 4926 first = false; 4927 } 4928 return res; 4929 } 4930 4931 void G1CollectedHeap::remove_allocated_regions_from_lists() { 4932 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4933 { 4934 HeapRegion* prev = NULL; 4935 HeapRegion* cur = _unclean_region_list.hd(); 4936 while (cur != NULL) { 4937 HeapRegion* next = cur->next_from_unclean_list(); 4938 if (cur->zero_fill_is_allocated()) { 4939 // Remove from the list. 4940 if (prev == NULL) { 4941 (void)_unclean_region_list.pop(); 4942 } else { 4943 _unclean_region_list.delete_after(prev); 4944 } 4945 cur->set_on_unclean_list(false); 4946 cur->set_next_on_unclean_list(NULL); 4947 } else { 4948 prev = cur; 4949 } 4950 cur = next; 4951 } 4952 assert(_unclean_region_list.sz() == unclean_region_list_length(), 4953 "Inv"); 4954 } 4955 4956 { 4957 HeapRegion* prev = NULL; 4958 HeapRegion* cur = _free_region_list; 4959 while (cur != NULL) { 4960 HeapRegion* next = cur->next_from_free_list(); 4961 if (cur->zero_fill_is_allocated()) { 4962 // Remove from the list. 4963 if (prev == NULL) { 4964 _free_region_list = cur->next_from_free_list(); 4965 } else { 4966 prev->set_next_on_free_list(cur->next_from_free_list()); 4967 } 4968 cur->set_on_free_list(false); 4969 cur->set_next_on_free_list(NULL); 4970 _free_region_list_size--; 4971 } else { 4972 prev = cur; 4973 } 4974 cur = next; 4975 } 4976 assert(_free_region_list_size == free_region_list_length(), "Inv"); 4977 } 4978 } 4979 4980 bool G1CollectedHeap::verify_region_lists() { 4981 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 4982 return verify_region_lists_locked(); 4983 } 4984 4985 bool G1CollectedHeap::verify_region_lists_locked() { 4986 HeapRegion* unclean = _unclean_region_list.hd(); 4987 while (unclean != NULL) { 4988 guarantee(unclean->is_on_unclean_list(), "Well, it is!"); 4989 guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!"); 4990 guarantee(unclean->zero_fill_state() != HeapRegion::Allocated, 4991 "Everything else is possible."); 4992 unclean = unclean->next_from_unclean_list(); 4993 } 4994 guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv"); 4995 4996 HeapRegion* free_r = _free_region_list; 4997 while (free_r != NULL) { 4998 assert(free_r->is_on_free_list(), "Well, it is!"); 4999 assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!"); 5000 switch (free_r->zero_fill_state()) { 5001 case HeapRegion::NotZeroFilled: 5002 case HeapRegion::ZeroFilling: 5003 guarantee(false, "Should not be on free list."); 5004 break; 5005 default: 5006 // Everything else is possible. 5007 break; 5008 } 5009 free_r = free_r->next_from_free_list(); 5010 } 5011 guarantee(_free_region_list_size == free_region_list_length(), "Inv"); 5012 // If we didn't do an assertion... 5013 return true; 5014 } 5015 5016 size_t G1CollectedHeap::free_region_list_length() { 5017 assert(ZF_mon->owned_by_self(), "precondition."); 5018 size_t len = 0; 5019 HeapRegion* cur = _free_region_list; 5020 while (cur != NULL) { 5021 len++; 5022 cur = cur->next_from_free_list(); 5023 } 5024 return len; 5025 } 5026 5027 size_t G1CollectedHeap::unclean_region_list_length() { 5028 assert(ZF_mon->owned_by_self(), "precondition."); 5029 return _unclean_region_list.length(); 5030 } 5031 5032 size_t G1CollectedHeap::n_regions() { 5033 return _hrs->length(); 5034 } 5035 5036 size_t G1CollectedHeap::max_regions() { 5037 return 5038 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / 5039 HeapRegion::GrainBytes; 5040 } 5041 5042 size_t G1CollectedHeap::free_regions() { 5043 /* Possibly-expensive assert. 5044 assert(_free_regions == count_free_regions(), 5045 "_free_regions is off."); 5046 */ 5047 return _free_regions; 5048 } 5049 5050 bool G1CollectedHeap::should_zf() { 5051 return _free_region_list_size < (size_t) G1ConcZFMaxRegions; 5052 } 5053 5054 class RegionCounter: public HeapRegionClosure { 5055 size_t _n; 5056 public: 5057 RegionCounter() : _n(0) {} 5058 bool doHeapRegion(HeapRegion* r) { 5059 if (r->is_empty()) { 5060 assert(!r->isHumongous(), "H regions should not be empty."); 5061 _n++; 5062 } 5063 return false; 5064 } 5065 int res() { return (int) _n; } 5066 }; 5067 5068 size_t G1CollectedHeap::count_free_regions() { 5069 RegionCounter rc; 5070 heap_region_iterate(&rc); 5071 size_t n = rc.res(); 5072 if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty()) 5073 n--; 5074 return n; 5075 } 5076 5077 size_t G1CollectedHeap::count_free_regions_list() { 5078 size_t n = 0; 5079 size_t o = 0; 5080 ZF_mon->lock_without_safepoint_check(); 5081 HeapRegion* cur = _free_region_list; 5082 while (cur != NULL) { 5083 cur = cur->next_from_free_list(); 5084 n++; 5085 } 5086 size_t m = unclean_region_list_length(); 5087 ZF_mon->unlock(); 5088 return n + m; 5089 } 5090 5091 bool G1CollectedHeap::should_set_young_locked() { 5092 assert(heap_lock_held_for_gc(), 5093 "the heap lock should already be held by or for this thread"); 5094 return (g1_policy()->in_young_gc_mode() && 5095 g1_policy()->should_add_next_region_to_young_list()); 5096 } 5097 5098 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { 5099 assert(heap_lock_held_for_gc(), 5100 "the heap lock should already be held by or for this thread"); 5101 _young_list->push_region(hr); 5102 g1_policy()->set_region_short_lived(hr); 5103 } 5104 5105 class NoYoungRegionsClosure: public HeapRegionClosure { 5106 private: 5107 bool _success; 5108 public: 5109 NoYoungRegionsClosure() : _success(true) { } 5110 bool doHeapRegion(HeapRegion* r) { 5111 if (r->is_young()) { 5112 gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young", 5113 r->bottom(), r->end()); 5114 _success = false; 5115 } 5116 return false; 5117 } 5118 bool success() { return _success; } 5119 }; 5120 5121 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { 5122 bool ret = _young_list->check_list_empty(check_sample); 5123 5124 if (check_heap) { 5125 NoYoungRegionsClosure closure; 5126 heap_region_iterate(&closure); 5127 ret = ret && closure.success(); 5128 } 5129 5130 return ret; 5131 } 5132 5133 void G1CollectedHeap::empty_young_list() { 5134 assert(heap_lock_held_for_gc(), 5135 "the heap lock should already be held by or for this thread"); 5136 assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode"); 5137 5138 _young_list->empty_list(); 5139 } 5140 5141 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() { 5142 bool no_allocs = true; 5143 for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) { 5144 HeapRegion* r = _gc_alloc_regions[ap]; 5145 no_allocs = r == NULL || r->saved_mark_at_top(); 5146 } 5147 return no_allocs; 5148 } 5149 5150 void G1CollectedHeap::retire_all_alloc_regions() { 5151 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 5152 HeapRegion* r = _gc_alloc_regions[ap]; 5153 if (r != NULL) { 5154 // Check for aliases. 5155 bool has_processed_alias = false; 5156 for (int i = 0; i < ap; ++i) { 5157 if (_gc_alloc_regions[i] == r) { 5158 has_processed_alias = true; 5159 break; 5160 } 5161 } 5162 if (!has_processed_alias) { 5163 retire_alloc_region(r, false /* par */); 5164 } 5165 } 5166 } 5167 } 5168 5169 5170 // Done at the start of full GC. 5171 void G1CollectedHeap::tear_down_region_lists() { 5172 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5173 while (pop_unclean_region_list_locked() != NULL) ; 5174 assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, 5175 "Postconditions of loop."); 5176 while (pop_free_region_list_locked() != NULL) ; 5177 assert(_free_region_list == NULL, "Postcondition of loop."); 5178 if (_free_region_list_size != 0) { 5179 gclog_or_tty->print_cr("Size is %d.", _free_region_list_size); 5180 print_on(gclog_or_tty, true /* extended */); 5181 } 5182 assert(_free_region_list_size == 0, "Postconditions of loop."); 5183 } 5184 5185 5186 class RegionResetter: public HeapRegionClosure { 5187 G1CollectedHeap* _g1; 5188 int _n; 5189 public: 5190 RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {} 5191 bool doHeapRegion(HeapRegion* r) { 5192 if (r->continuesHumongous()) return false; 5193 if (r->top() > r->bottom()) { 5194 if (r->top() < r->end()) { 5195 Copy::fill_to_words(r->top(), 5196 pointer_delta(r->end(), r->top())); 5197 } 5198 r->set_zero_fill_allocated(); 5199 } else { 5200 assert(r->is_empty(), "tautology"); 5201 _n++; 5202 switch (r->zero_fill_state()) { 5203 case HeapRegion::NotZeroFilled: 5204 case HeapRegion::ZeroFilling: 5205 _g1->put_region_on_unclean_list_locked(r); 5206 break; 5207 case HeapRegion::Allocated: 5208 r->set_zero_fill_complete(); 5209 // no break; go on to put on free list. 5210 case HeapRegion::ZeroFilled: 5211 _g1->put_free_region_on_list_locked(r); 5212 break; 5213 } 5214 } 5215 return false; 5216 } 5217 5218 int getFreeRegionCount() {return _n;} 5219 }; 5220 5221 // Done at the end of full GC. 5222 void G1CollectedHeap::rebuild_region_lists() { 5223 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5224 // This needs to go at the end of the full GC. 5225 RegionResetter rs; 5226 heap_region_iterate(&rs); 5227 _free_regions = rs.getFreeRegionCount(); 5228 // Tell the ZF thread it may have work to do. 5229 if (should_zf()) ZF_mon->notify_all(); 5230 } 5231 5232 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure { 5233 G1CollectedHeap* _g1; 5234 int _n; 5235 public: 5236 UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {} 5237 bool doHeapRegion(HeapRegion* r) { 5238 if (r->continuesHumongous()) return false; 5239 if (r->top() > r->bottom()) { 5240 // There are assertions in "set_zero_fill_needed()" below that 5241 // require top() == bottom(), so this is technically illegal. 5242 // We'll skirt the law here, by making that true temporarily. 5243 DEBUG_ONLY(HeapWord* save_top = r->top(); 5244 r->set_top(r->bottom())); 5245 r->set_zero_fill_needed(); 5246 DEBUG_ONLY(r->set_top(save_top)); 5247 } 5248 return false; 5249 } 5250 }; 5251 5252 // Done at the start of full GC. 5253 void G1CollectedHeap::set_used_regions_to_need_zero_fill() { 5254 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 5255 // This needs to go at the end of the full GC. 5256 UsedRegionsNeedZeroFillSetter rs; 5257 heap_region_iterate(&rs); 5258 } 5259 5260 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { 5261 _refine_cte_cl->set_concurrent(concurrent); 5262 } 5263 5264 #ifndef PRODUCT 5265 5266 class PrintHeapRegionClosure: public HeapRegionClosure { 5267 public: 5268 bool doHeapRegion(HeapRegion *r) { 5269 gclog_or_tty->print("Region: "PTR_FORMAT":", r); 5270 if (r != NULL) { 5271 if (r->is_on_free_list()) 5272 gclog_or_tty->print("Free "); 5273 if (r->is_young()) 5274 gclog_or_tty->print("Young "); 5275 if (r->isHumongous()) 5276 gclog_or_tty->print("Is Humongous "); 5277 r->print(); 5278 } 5279 return false; 5280 } 5281 }; 5282 5283 class SortHeapRegionClosure : public HeapRegionClosure { 5284 size_t young_regions,free_regions, unclean_regions; 5285 size_t hum_regions, count; 5286 size_t unaccounted, cur_unclean, cur_alloc; 5287 size_t total_free; 5288 HeapRegion* cur; 5289 public: 5290 SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0), 5291 free_regions(0), unclean_regions(0), 5292 hum_regions(0), 5293 count(0), unaccounted(0), 5294 cur_alloc(0), total_free(0) 5295 {} 5296 bool doHeapRegion(HeapRegion *r) { 5297 count++; 5298 if (r->is_on_free_list()) free_regions++; 5299 else if (r->is_on_unclean_list()) unclean_regions++; 5300 else if (r->isHumongous()) hum_regions++; 5301 else if (r->is_young()) young_regions++; 5302 else if (r == cur) cur_alloc++; 5303 else unaccounted++; 5304 return false; 5305 } 5306 void print() { 5307 total_free = free_regions + unclean_regions; 5308 gclog_or_tty->print("%d regions\n", count); 5309 gclog_or_tty->print("%d free: free_list = %d unclean = %d\n", 5310 total_free, free_regions, unclean_regions); 5311 gclog_or_tty->print("%d humongous %d young\n", 5312 hum_regions, young_regions); 5313 gclog_or_tty->print("%d cur_alloc\n", cur_alloc); 5314 gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted); 5315 } 5316 }; 5317 5318 void G1CollectedHeap::print_region_counts() { 5319 SortHeapRegionClosure sc(_cur_alloc_region); 5320 PrintHeapRegionClosure cl; 5321 heap_region_iterate(&cl); 5322 heap_region_iterate(&sc); 5323 sc.print(); 5324 print_region_accounting_info(); 5325 }; 5326 5327 bool G1CollectedHeap::regions_accounted_for() { 5328 // TODO: regions accounting for young/survivor/tenured 5329 return true; 5330 } 5331 5332 bool G1CollectedHeap::print_region_accounting_info() { 5333 gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).", 5334 free_regions(), 5335 count_free_regions(), count_free_regions_list(), 5336 _free_region_list_size, _unclean_region_list.sz()); 5337 gclog_or_tty->print_cr("cur_alloc: %d.", 5338 (_cur_alloc_region == NULL ? 0 : 1)); 5339 gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions); 5340 5341 // TODO: check regions accounting for young/survivor/tenured 5342 return true; 5343 } 5344 5345 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { 5346 HeapRegion* hr = heap_region_containing(p); 5347 if (hr == NULL) { 5348 return is_in_permanent(p); 5349 } else { 5350 return hr->is_in(p); 5351 } 5352 } 5353 #endif // !PRODUCT 5354 5355 void G1CollectedHeap::g1_unimplemented() { 5356 // Unimplemented(); 5357 }