1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/serial/defNewGeneration.inline.hpp" 27 #include "gc/serial/serialHeap.inline.hpp" 28 #include "gc/serial/tenuredGeneration.hpp" 29 #include "gc/shared/adaptiveSizePolicy.hpp" 30 #include "gc/shared/ageTable.inline.hpp" 31 #include "gc/shared/cardTableRS.hpp" 32 #include "gc/shared/collectorCounters.hpp" 33 #include "gc/shared/gcArguments.hpp" 34 #include "gc/shared/gcHeapSummary.hpp" 35 #include "gc/shared/gcLocker.hpp" 36 #include "gc/shared/gcPolicyCounters.hpp" 37 #include "gc/shared/gcTimer.hpp" 38 #include "gc/shared/gcTrace.hpp" 39 #include "gc/shared/gcTraceTime.inline.hpp" 40 #include "gc/shared/genOopClosures.inline.hpp" 41 #include "gc/shared/generationSpec.hpp" 42 #include "gc/shared/preservedMarks.inline.hpp" 43 #include "gc/shared/referencePolicy.hpp" 44 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 45 #include "gc/shared/space.inline.hpp" 46 #include "gc/shared/spaceDecorator.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/weakProcessor.hpp" 49 #include "logging/log.hpp" 50 #include "memory/iterator.inline.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "oops/instanceRefKlass.hpp" 53 #include "oops/oop.inline.hpp" 54 #include "runtime/atomic.hpp" 55 #include "runtime/java.hpp" 56 #include "runtime/mutexLocker.inline.hpp" 57 #include "runtime/prefetch.inline.hpp" 58 #include "runtime/thread.inline.hpp" 59 #include "utilities/align.hpp" 60 #include "utilities/copy.hpp" 61 #include "utilities/globalDefinitions.hpp" 62 #include "utilities/stack.inline.hpp" 63 64 // 65 // DefNewGeneration functions. 66 67 // Methods of protected closure types. 68 69 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) { 70 assert(_young_gen->kind() == Generation::ParNew || 71 _young_gen->kind() == Generation::DefNew, "Expected the young generation here"); 72 } 73 74 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 75 return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded(); 76 } 77 78 DefNewGeneration::KeepAliveClosure:: 79 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 80 _rs = GenCollectedHeap::heap()->rem_set(); 81 } 82 83 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 84 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 85 86 87 DefNewGeneration::FastKeepAliveClosure:: 88 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 89 DefNewGeneration::KeepAliveClosure(cl) { 90 _boundary = g->reserved().end(); 91 } 92 93 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 94 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 95 96 DefNewGeneration::FastEvacuateFollowersClosure:: 97 FastEvacuateFollowersClosure(SerialHeap* heap, 98 FastScanClosure* cur, 99 FastScanClosure* older) : 100 _heap(heap), _scan_cur_or_nonheap(cur), _scan_older(older) 101 { 102 } 103 104 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 105 do { 106 _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap, _scan_older); 107 } while (!_heap->no_allocs_since_save_marks()); 108 guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan"); 109 } 110 111 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 112 OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 113 { 114 _boundary = _g->reserved().end(); 115 } 116 117 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 118 OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 119 { 120 _boundary = _g->reserved().end(); 121 } 122 123 void CLDScanClosure::do_cld(ClassLoaderData* cld) { 124 NOT_PRODUCT(ResourceMark rm); 125 log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s", 126 p2i(cld), 127 cld->loader_name_and_id(), 128 cld->has_modified_oops() ? "true" : "false"); 129 130 // If the cld has not been dirtied we know that there's 131 // no references into the young gen and we can skip it. 132 if (cld->has_modified_oops()) { 133 if (_accumulate_modified_oops) { 134 cld->accumulate_modified_oops(); 135 } 136 137 // Tell the closure which CLD is being scanned so that it can be dirtied 138 // if oops are left pointing into the young gen. 139 _scavenge_closure->set_scanned_cld(cld); 140 141 // Clean the cld since we're going to scavenge all the metadata. 142 cld->oops_do(_scavenge_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true); 143 144 _scavenge_closure->set_scanned_cld(NULL); 145 } 146 } 147 148 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 149 _g(g) 150 { 151 _boundary = _g->reserved().end(); 152 } 153 154 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 155 size_t initial_size, 156 size_t min_size, 157 size_t max_size, 158 const char* policy) 159 : Generation(rs, initial_size), 160 _preserved_marks_set(false /* in_c_heap */), 161 _promo_failure_drain_in_progress(false), 162 _should_allocate_from_space(false) 163 { 164 MemRegion cmr((HeapWord*)_virtual_space.low(), 165 (HeapWord*)_virtual_space.high()); 166 GenCollectedHeap* gch = GenCollectedHeap::heap(); 167 168 gch->rem_set()->resize_covered_region(cmr); 169 170 _eden_space = new ContiguousSpace(); 171 _from_space = new ContiguousSpace(); 172 _to_space = new ContiguousSpace(); 173 174 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { 175 vm_exit_during_initialization("Could not allocate a new gen space"); 176 } 177 178 // Compute the maximum eden and survivor space sizes. These sizes 179 // are computed assuming the entire reserved space is committed. 180 // These values are exported as performance counters. 181 uintx size = _virtual_space.reserved_size(); 182 _max_survivor_size = compute_survivor_size(size, SpaceAlignment); 183 _max_eden_size = size - (2*_max_survivor_size); 184 185 // allocate the performance counters 186 187 // Generation counters -- generation 0, 3 subspaces 188 _gen_counters = new GenerationCounters("new", 0, 3, 189 min_size, max_size, &_virtual_space); 190 _gc_counters = new CollectorCounters(policy, 0); 191 192 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 193 _gen_counters); 194 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 195 _gen_counters); 196 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 197 _gen_counters); 198 199 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 200 update_counters(); 201 _old_gen = NULL; 202 _tenuring_threshold = MaxTenuringThreshold; 203 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 204 205 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 206 } 207 208 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 209 bool clear_space, 210 bool mangle_space) { 211 // If the spaces are being cleared (only done at heap initialization 212 // currently), the survivor spaces need not be empty. 213 // Otherwise, no care is taken for used areas in the survivor spaces 214 // so check. 215 assert(clear_space || (to()->is_empty() && from()->is_empty()), 216 "Initialization of the survivor spaces assumes these are empty"); 217 218 // Compute sizes 219 uintx size = _virtual_space.committed_size(); 220 uintx survivor_size = compute_survivor_size(size, SpaceAlignment); 221 uintx eden_size = size - (2*survivor_size); 222 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 223 224 if (eden_size < minimum_eden_size) { 225 // May happen due to 64Kb rounding, if so adjust eden size back up 226 minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment); 227 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 228 uintx unaligned_survivor_size = 229 align_down(maximum_survivor_size, SpaceAlignment); 230 survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment); 231 eden_size = size - (2*survivor_size); 232 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 233 assert(eden_size >= minimum_eden_size, "just checking"); 234 } 235 236 char *eden_start = _virtual_space.low(); 237 char *from_start = eden_start + eden_size; 238 char *to_start = from_start + survivor_size; 239 char *to_end = to_start + survivor_size; 240 241 assert(to_end == _virtual_space.high(), "just checking"); 242 assert(Space::is_aligned(eden_start), "checking alignment"); 243 assert(Space::is_aligned(from_start), "checking alignment"); 244 assert(Space::is_aligned(to_start), "checking alignment"); 245 246 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 247 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 248 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 249 250 // A minimum eden size implies that there is a part of eden that 251 // is being used and that affects the initialization of any 252 // newly formed eden. 253 bool live_in_eden = minimum_eden_size > 0; 254 255 // If not clearing the spaces, do some checking to verify that 256 // the space are already mangled. 257 if (!clear_space) { 258 // Must check mangling before the spaces are reshaped. Otherwise, 259 // the bottom or end of one space may have moved into another 260 // a failure of the check may not correctly indicate which space 261 // is not properly mangled. 262 if (ZapUnusedHeapArea) { 263 HeapWord* limit = (HeapWord*) _virtual_space.high(); 264 eden()->check_mangled_unused_area(limit); 265 from()->check_mangled_unused_area(limit); 266 to()->check_mangled_unused_area(limit); 267 } 268 } 269 270 // Reset the spaces for their new regions. 271 eden()->initialize(edenMR, 272 clear_space && !live_in_eden, 273 SpaceDecorator::Mangle); 274 // If clear_space and live_in_eden, we will not have cleared any 275 // portion of eden above its top. This can cause newly 276 // expanded space not to be mangled if using ZapUnusedHeapArea. 277 // We explicitly do such mangling here. 278 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 279 eden()->mangle_unused_area(); 280 } 281 from()->initialize(fromMR, clear_space, mangle_space); 282 to()->initialize(toMR, clear_space, mangle_space); 283 284 // Set next compaction spaces. 285 eden()->set_next_compaction_space(from()); 286 // The to-space is normally empty before a compaction so need 287 // not be considered. The exception is during promotion 288 // failure handling when to-space can contain live objects. 289 from()->set_next_compaction_space(NULL); 290 } 291 292 void DefNewGeneration::swap_spaces() { 293 ContiguousSpace* s = from(); 294 _from_space = to(); 295 _to_space = s; 296 eden()->set_next_compaction_space(from()); 297 // The to-space is normally empty before a compaction so need 298 // not be considered. The exception is during promotion 299 // failure handling when to-space can contain live objects. 300 from()->set_next_compaction_space(NULL); 301 302 if (UsePerfData) { 303 CSpaceCounters* c = _from_counters; 304 _from_counters = _to_counters; 305 _to_counters = c; 306 } 307 } 308 309 bool DefNewGeneration::expand(size_t bytes) { 310 MutexLocker x(ExpandHeap_lock); 311 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 312 bool success = _virtual_space.expand_by(bytes); 313 if (success && ZapUnusedHeapArea) { 314 // Mangle newly committed space immediately because it 315 // can be done here more simply that after the new 316 // spaces have been computed. 317 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 318 MemRegion mangle_region(prev_high, new_high); 319 SpaceMangler::mangle_region(mangle_region); 320 } 321 322 // Do not attempt an expand-to-the reserve size. The 323 // request should properly observe the maximum size of 324 // the generation so an expand-to-reserve should be 325 // unnecessary. Also a second call to expand-to-reserve 326 // value potentially can cause an undue expansion. 327 // For example if the first expand fail for unknown reasons, 328 // but the second succeeds and expands the heap to its maximum 329 // value. 330 if (GCLocker::is_active()) { 331 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 332 } 333 334 return success; 335 } 336 337 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate, 338 size_t new_size_before, 339 size_t alignment) const { 340 size_t desired_new_size = new_size_before; 341 342 if (NewSizeThreadIncrease > 0) { 343 int threads_count; 344 size_t thread_increase_size = 0; 345 346 // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'. 347 threads_count = Threads::number_of_non_daemon_threads(); 348 if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) { 349 thread_increase_size = threads_count * NewSizeThreadIncrease; 350 351 // 2. Check an overflow at 'new_size_candidate + thread_increase_size'. 352 if (new_size_candidate <= max_uintx - thread_increase_size) { 353 new_size_candidate += thread_increase_size; 354 355 // 3. Check an overflow at 'align_up'. 356 size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1)); 357 if (new_size_candidate <= aligned_max) { 358 desired_new_size = align_up(new_size_candidate, alignment); 359 } 360 } 361 } 362 } 363 364 return desired_new_size; 365 } 366 367 void DefNewGeneration::compute_new_size() { 368 // This is called after a GC that includes the old generation, so from-space 369 // will normally be empty. 370 // Note that we check both spaces, since if scavenge failed they revert roles. 371 // If not we bail out (otherwise we would have to relocate the objects). 372 if (!from()->is_empty() || !to()->is_empty()) { 373 return; 374 } 375 376 GenCollectedHeap* gch = GenCollectedHeap::heap(); 377 378 size_t old_size = gch->old_gen()->capacity(); 379 size_t new_size_before = _virtual_space.committed_size(); 380 size_t min_new_size = initial_size(); 381 size_t max_new_size = reserved().byte_size(); 382 assert(min_new_size <= new_size_before && 383 new_size_before <= max_new_size, 384 "just checking"); 385 // All space sizes must be multiples of Generation::GenGrain. 386 size_t alignment = Generation::GenGrain; 387 388 int threads_count = 0; 389 size_t thread_increase_size = 0; 390 391 size_t new_size_candidate = old_size / NewRatio; 392 // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease 393 // and reverts to previous value if any overflow happens 394 size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment); 395 396 // Adjust new generation size 397 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 398 assert(desired_new_size <= max_new_size, "just checking"); 399 400 bool changed = false; 401 if (desired_new_size > new_size_before) { 402 size_t change = desired_new_size - new_size_before; 403 assert(change % alignment == 0, "just checking"); 404 if (expand(change)) { 405 changed = true; 406 } 407 // If the heap failed to expand to the desired size, 408 // "changed" will be false. If the expansion failed 409 // (and at this point it was expected to succeed), 410 // ignore the failure (leaving "changed" as false). 411 } 412 if (desired_new_size < new_size_before && eden()->is_empty()) { 413 // bail out of shrinking if objects in eden 414 size_t change = new_size_before - desired_new_size; 415 assert(change % alignment == 0, "just checking"); 416 _virtual_space.shrink_by(change); 417 changed = true; 418 } 419 if (changed) { 420 // The spaces have already been mangled at this point but 421 // may not have been cleared (set top = bottom) and should be. 422 // Mangling was done when the heap was being expanded. 423 compute_space_boundaries(eden()->used(), 424 SpaceDecorator::Clear, 425 SpaceDecorator::DontMangle); 426 MemRegion cmr((HeapWord*)_virtual_space.low(), 427 (HeapWord*)_virtual_space.high()); 428 gch->rem_set()->resize_covered_region(cmr); 429 430 log_debug(gc, ergo, heap)( 431 "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 432 new_size_before/K, _virtual_space.committed_size()/K, 433 eden()->capacity()/K, from()->capacity()/K); 434 log_trace(gc, ergo, heap)( 435 " [allowed " SIZE_FORMAT "K extra for %d threads]", 436 thread_increase_size/K, threads_count); 437 } 438 } 439 440 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) { 441 assert(false, "NYI -- are you sure you want to call this?"); 442 } 443 444 445 size_t DefNewGeneration::capacity() const { 446 return eden()->capacity() 447 + from()->capacity(); // to() is only used during scavenge 448 } 449 450 451 size_t DefNewGeneration::used() const { 452 return eden()->used() 453 + from()->used(); // to() is only used during scavenge 454 } 455 456 457 size_t DefNewGeneration::free() const { 458 return eden()->free() 459 + from()->free(); // to() is only used during scavenge 460 } 461 462 size_t DefNewGeneration::max_capacity() const { 463 const size_t reserved_bytes = reserved().byte_size(); 464 return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment); 465 } 466 467 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 468 return eden()->free(); 469 } 470 471 size_t DefNewGeneration::capacity_before_gc() const { 472 return eden()->capacity(); 473 } 474 475 size_t DefNewGeneration::contiguous_available() const { 476 return eden()->free(); 477 } 478 479 480 HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); } 481 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 482 483 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 484 eden()->object_iterate(blk); 485 from()->object_iterate(blk); 486 } 487 488 489 void DefNewGeneration::space_iterate(SpaceClosure* blk, 490 bool usedOnly) { 491 blk->do_space(eden()); 492 blk->do_space(from()); 493 blk->do_space(to()); 494 } 495 496 // The last collection bailed out, we are running out of heap space, 497 // so we try to allocate the from-space, too. 498 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 499 bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc(); 500 501 // If the Heap_lock is not locked by this thread, this will be called 502 // again later with the Heap_lock held. 503 bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread())); 504 505 HeapWord* result = NULL; 506 if (do_alloc) { 507 result = from()->allocate(size); 508 } 509 510 log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "): will_fail: %s heap_lock: %s free: " SIZE_FORMAT "%s%s returns %s", 511 size, 512 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 513 "true" : "false", 514 Heap_lock->is_locked() ? "locked" : "unlocked", 515 from()->free(), 516 should_try_alloc ? "" : " should_allocate_from_space: NOT", 517 do_alloc ? " Heap_lock is not owned by self" : "", 518 result == NULL ? "NULL" : "object"); 519 520 return result; 521 } 522 523 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 524 bool is_tlab, 525 bool parallel) { 526 // We don't attempt to expand the young generation (but perhaps we should.) 527 return allocate(size, is_tlab); 528 } 529 530 void DefNewGeneration::adjust_desired_tenuring_threshold() { 531 // Set the desired survivor size to half the real survivor space 532 size_t const survivor_capacity = to()->capacity() / HeapWordSize; 533 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100); 534 535 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size); 536 537 if (UsePerfData) { 538 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters(); 539 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold); 540 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize); 541 } 542 543 age_table()->print_age_table(_tenuring_threshold); 544 } 545 546 void DefNewGeneration::collect(bool full, 547 bool clear_all_soft_refs, 548 size_t size, 549 bool is_tlab) { 550 assert(full || size > 0, "otherwise we don't want to collect"); 551 552 SerialHeap* heap = SerialHeap::heap(); 553 554 _gc_timer->register_gc_start(); 555 DefNewTracer gc_tracer; 556 gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer->gc_start()); 557 558 _old_gen = heap->old_gen(); 559 560 // If the next generation is too full to accommodate promotion 561 // from this generation, pass on collection; let the next generation 562 // do it. 563 if (!collection_attempt_is_safe()) { 564 log_trace(gc)(":: Collection attempt not safe ::"); 565 heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 566 return; 567 } 568 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 569 570 init_assuming_no_promotion_failure(); 571 572 GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, heap->gc_cause()); 573 574 heap->trace_heap_before_gc(&gc_tracer); 575 576 // These can be shared for all code paths 577 IsAliveClosure is_alive(this); 578 ScanWeakRefClosure scan_weak_ref(this); 579 580 age_table()->clear(); 581 to()->clear(SpaceDecorator::Mangle); 582 // The preserved marks should be empty at the start of the GC. 583 _preserved_marks_set.init(1); 584 585 heap->rem_set()->prepare_for_younger_refs_iterate(false); 586 587 assert(heap->no_allocs_since_save_marks(), 588 "save marks have not been newly set."); 589 590 FastScanClosure fsc_with_no_gc_barrier(this, false); 591 FastScanClosure fsc_with_gc_barrier(this, true); 592 593 CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier, 594 heap->rem_set()->cld_rem_set()->accumulate_modified_oops()); 595 596 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 597 FastEvacuateFollowersClosure evacuate_followers(heap, 598 &fsc_with_no_gc_barrier, 599 &fsc_with_gc_barrier); 600 601 assert(heap->no_allocs_since_save_marks(), 602 "save marks have not been newly set."); 603 604 { 605 // DefNew needs to run with n_threads == 0, to make sure the serial 606 // version of the card table scanning code is used. 607 // See: CardTableRS::non_clean_card_iterate_possibly_parallel. 608 StrongRootsScope srs(0); 609 610 heap->young_process_roots(&srs, 611 &fsc_with_no_gc_barrier, 612 &fsc_with_gc_barrier, 613 &cld_scan_closure); 614 } 615 616 // "evacuate followers". 617 evacuate_followers.do_void(); 618 619 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 620 ReferenceProcessor* rp = ref_processor(); 621 rp->setup_policy(clear_all_soft_refs); 622 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues()); 623 const ReferenceProcessorStats& stats = 624 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 625 NULL, &pt); 626 gc_tracer.report_gc_reference_stats(stats); 627 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 628 pt.print_all_references(); 629 630 assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set."); 631 632 WeakProcessor::weak_oops_do(&is_alive, &keep_alive); 633 634 // Verify that the usage of keep_alive didn't copy any objects. 635 assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set."); 636 637 if (!_promotion_failed) { 638 // Swap the survivor spaces. 639 eden()->clear(SpaceDecorator::Mangle); 640 from()->clear(SpaceDecorator::Mangle); 641 if (ZapUnusedHeapArea) { 642 // This is now done here because of the piece-meal mangling which 643 // can check for valid mangling at intermediate points in the 644 // collection(s). When a young collection fails to collect 645 // sufficient space resizing of the young generation can occur 646 // an redistribute the spaces in the young generation. Mangle 647 // here so that unzapped regions don't get distributed to 648 // other spaces. 649 to()->mangle_unused_area(); 650 } 651 swap_spaces(); 652 653 assert(to()->is_empty(), "to space should be empty now"); 654 655 adjust_desired_tenuring_threshold(); 656 657 // A successful scavenge should restart the GC time limit count which is 658 // for full GC's. 659 AdaptiveSizePolicy* size_policy = heap->size_policy(); 660 size_policy->reset_gc_overhead_limit_count(); 661 assert(!heap->incremental_collection_failed(), "Should be clear"); 662 } else { 663 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 664 _promo_failure_scan_stack.clear(true); // Clear cached segments. 665 666 remove_forwarding_pointers(); 667 log_info(gc, promotion)("Promotion failed"); 668 // Add to-space to the list of space to compact 669 // when a promotion failure has occurred. In that 670 // case there can be live objects in to-space 671 // as a result of a partial evacuation of eden 672 // and from-space. 673 swap_spaces(); // For uniformity wrt ParNewGeneration. 674 from()->set_next_compaction_space(to()); 675 heap->set_incremental_collection_failed(); 676 677 // Inform the next generation that a promotion failure occurred. 678 _old_gen->promotion_failure_occurred(); 679 gc_tracer.report_promotion_failed(_promotion_failed_info); 680 681 // Reset the PromotionFailureALot counters. 682 NOT_PRODUCT(heap->reset_promotion_should_fail();) 683 } 684 // We should have processed and cleared all the preserved marks. 685 _preserved_marks_set.reclaim(); 686 // set new iteration safe limit for the survivor spaces 687 from()->set_concurrent_iteration_safe_limit(from()->top()); 688 to()->set_concurrent_iteration_safe_limit(to()->top()); 689 690 // We need to use a monotonically non-decreasing time in ms 691 // or we will see time-warp warnings and os::javaTimeMillis() 692 // does not guarantee monotonicity. 693 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 694 update_time_of_last_gc(now); 695 696 heap->trace_heap_after_gc(&gc_tracer); 697 698 _gc_timer->register_gc_end(); 699 700 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 701 } 702 703 void DefNewGeneration::init_assuming_no_promotion_failure() { 704 _promotion_failed = false; 705 _promotion_failed_info.reset(); 706 from()->set_next_compaction_space(NULL); 707 } 708 709 void DefNewGeneration::remove_forwarding_pointers() { 710 RemoveForwardedPointerClosure rspc; 711 eden()->object_iterate(&rspc); 712 from()->object_iterate(&rspc); 713 restore_preserved_marks(); 714 } 715 716 void DefNewGeneration::restore_preserved_marks() { 717 SharedRestorePreservedMarksTaskExecutor task_executor(NULL); 718 _preserved_marks_set.restore(&task_executor); 719 } 720 721 void DefNewGeneration::handle_promotion_failure(oop old) { 722 log_debug(gc, promotion)("Promotion failure size = %d) ", old->size()); 723 724 _promotion_failed = true; 725 _promotion_failed_info.register_copy_failure(old->size()); 726 _preserved_marks_set.get()->push_if_necessary(old, old->mark_raw()); 727 // forward to self 728 old->forward_to(old); 729 730 _promo_failure_scan_stack.push(old); 731 732 if (!_promo_failure_drain_in_progress) { 733 // prevent recursion in copy_to_survivor_space() 734 _promo_failure_drain_in_progress = true; 735 drain_promo_failure_scan_stack(); 736 _promo_failure_drain_in_progress = false; 737 } 738 } 739 740 oop DefNewGeneration::copy_to_survivor_space(oop old) { 741 assert(is_in_reserved(old) && !old->is_forwarded(), 742 "shouldn't be scavenging this oop"); 743 size_t s = old->size(); 744 oop obj = NULL; 745 746 // Try allocating obj in to-space (unless too old) 747 if (old->age() < tenuring_threshold()) { 748 obj = (oop) to()->allocate_aligned(s); 749 } 750 751 // Otherwise try allocating obj tenured 752 if (obj == NULL) { 753 obj = _old_gen->promote(old, s); 754 if (obj == NULL) { 755 handle_promotion_failure(old); 756 return old; 757 } 758 } else { 759 // Prefetch beyond obj 760 const intx interval = PrefetchCopyIntervalInBytes; 761 Prefetch::write(obj, interval); 762 763 // Copy obj 764 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 765 766 // Increment age if obj still in new generation 767 obj->incr_age(); 768 age_table()->add(obj, s); 769 } 770 771 // Done, insert forward pointer to obj in this header 772 old->forward_to(obj); 773 774 return obj; 775 } 776 777 void DefNewGeneration::drain_promo_failure_scan_stack() { 778 while (!_promo_failure_scan_stack.is_empty()) { 779 oop obj = _promo_failure_scan_stack.pop(); 780 obj->oop_iterate(_promo_failure_scan_stack_closure); 781 } 782 } 783 784 void DefNewGeneration::save_marks() { 785 eden()->set_saved_mark(); 786 to()->set_saved_mark(); 787 from()->set_saved_mark(); 788 } 789 790 791 void DefNewGeneration::reset_saved_marks() { 792 eden()->reset_saved_mark(); 793 to()->reset_saved_mark(); 794 from()->reset_saved_mark(); 795 } 796 797 798 bool DefNewGeneration::no_allocs_since_save_marks() { 799 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 800 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 801 return to()->saved_mark_at_top(); 802 } 803 804 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 805 size_t max_alloc_words) { 806 if (requestor == this || _promotion_failed) { 807 return; 808 } 809 assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation"); 810 811 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 812 if (to_space->top() > to_space->bottom()) { 813 trace("to_space not empty when contribute_scratch called"); 814 } 815 */ 816 817 ContiguousSpace* to_space = to(); 818 assert(to_space->end() >= to_space->top(), "pointers out of order"); 819 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 820 if (free_words >= MinFreeScratchWords) { 821 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 822 sb->num_words = free_words; 823 sb->next = list; 824 list = sb; 825 } 826 } 827 828 void DefNewGeneration::reset_scratch() { 829 // If contributing scratch in to_space, mangle all of 830 // to_space if ZapUnusedHeapArea. This is needed because 831 // top is not maintained while using to-space as scratch. 832 if (ZapUnusedHeapArea) { 833 to()->mangle_unused_area_complete(); 834 } 835 } 836 837 bool DefNewGeneration::collection_attempt_is_safe() { 838 if (!to()->is_empty()) { 839 log_trace(gc)(":: to is not empty ::"); 840 return false; 841 } 842 if (_old_gen == NULL) { 843 GenCollectedHeap* gch = GenCollectedHeap::heap(); 844 _old_gen = gch->old_gen(); 845 } 846 return _old_gen->promotion_attempt_is_safe(used()); 847 } 848 849 void DefNewGeneration::gc_epilogue(bool full) { 850 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 851 852 assert(!GCLocker::is_active(), "We should not be executing here"); 853 // Check if the heap is approaching full after a collection has 854 // been done. Generally the young generation is empty at 855 // a minimum at the end of a collection. If it is not, then 856 // the heap is approaching full. 857 GenCollectedHeap* gch = GenCollectedHeap::heap(); 858 if (full) { 859 DEBUG_ONLY(seen_incremental_collection_failed = false;) 860 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 861 log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 862 GCCause::to_string(gch->gc_cause())); 863 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 864 set_should_allocate_from_space(); // we seem to be running out of space 865 } else { 866 log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 867 GCCause::to_string(gch->gc_cause())); 868 gch->clear_incremental_collection_failed(); // We just did a full collection 869 clear_should_allocate_from_space(); // if set 870 } 871 } else { 872 #ifdef ASSERT 873 // It is possible that incremental_collection_failed() == true 874 // here, because an attempted scavenge did not succeed. The policy 875 // is normally expected to cause a full collection which should 876 // clear that condition, so we should not be here twice in a row 877 // with incremental_collection_failed() == true without having done 878 // a full collection in between. 879 if (!seen_incremental_collection_failed && 880 gch->incremental_collection_failed()) { 881 log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 882 GCCause::to_string(gch->gc_cause())); 883 seen_incremental_collection_failed = true; 884 } else if (seen_incremental_collection_failed) { 885 log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 886 GCCause::to_string(gch->gc_cause())); 887 assert(gch->gc_cause() == GCCause::_scavenge_alot || 888 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 889 !gch->incremental_collection_failed(), 890 "Twice in a row"); 891 seen_incremental_collection_failed = false; 892 } 893 #endif // ASSERT 894 } 895 896 if (ZapUnusedHeapArea) { 897 eden()->check_mangled_unused_area_complete(); 898 from()->check_mangled_unused_area_complete(); 899 to()->check_mangled_unused_area_complete(); 900 } 901 902 if (!CleanChunkPoolAsync) { 903 Chunk::clean_chunk_pool(); 904 } 905 906 // update the generation and space performance counters 907 update_counters(); 908 gch->counters()->update_counters(); 909 } 910 911 void DefNewGeneration::record_spaces_top() { 912 assert(ZapUnusedHeapArea, "Not mangling unused space"); 913 eden()->set_top_for_allocations(); 914 to()->set_top_for_allocations(); 915 from()->set_top_for_allocations(); 916 } 917 918 void DefNewGeneration::ref_processor_init() { 919 Generation::ref_processor_init(); 920 } 921 922 923 void DefNewGeneration::update_counters() { 924 if (UsePerfData) { 925 _eden_counters->update_all(); 926 _from_counters->update_all(); 927 _to_counters->update_all(); 928 _gen_counters->update_all(); 929 } 930 } 931 932 void DefNewGeneration::verify() { 933 eden()->verify(); 934 from()->verify(); 935 to()->verify(); 936 } 937 938 void DefNewGeneration::print_on(outputStream* st) const { 939 Generation::print_on(st); 940 st->print(" eden"); 941 eden()->print_on(st); 942 st->print(" from"); 943 from()->print_on(st); 944 st->print(" to "); 945 to()->print_on(st); 946 } 947 948 949 const char* DefNewGeneration::name() const { 950 return "def new generation"; 951 } 952 953 // Moved from inline file as they are not called inline 954 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 955 return eden(); 956 } 957 958 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { 959 // This is the slow-path allocation for the DefNewGeneration. 960 // Most allocations are fast-path in compiled code. 961 // We try to allocate from the eden. If that works, we are happy. 962 // Note that since DefNewGeneration supports lock-free allocation, we 963 // have to use it here, as well. 964 HeapWord* result = eden()->par_allocate(word_size); 965 if (result != NULL) { 966 if (_old_gen != NULL) { 967 _old_gen->sample_eden_chunk(); 968 } 969 } else { 970 // If the eden is full and the last collection bailed out, we are running 971 // out of heap space, and we try to allocate the from-space, too. 972 // allocate_from_space can't be inlined because that would introduce a 973 // circular dependency at compile time. 974 result = allocate_from_space(word_size); 975 } 976 return result; 977 } 978 979 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 980 bool is_tlab) { 981 HeapWord* res = eden()->par_allocate(word_size); 982 if (_old_gen != NULL) { 983 _old_gen->sample_eden_chunk(); 984 } 985 return res; 986 } 987 988 size_t DefNewGeneration::tlab_capacity() const { 989 return eden()->capacity(); 990 } 991 992 size_t DefNewGeneration::tlab_used() const { 993 return eden()->used(); 994 } 995 996 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 997 return unsafe_max_alloc_nogc(); 998 }