1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/serial/defNewGeneration.inline.hpp" 27 #include "gc/shared/adaptiveSizePolicy.hpp" 28 #include "gc/shared/ageTable.inline.hpp" 29 #include "gc/shared/cardTableRS.hpp" 30 #include "gc/shared/collectorCounters.hpp" 31 #include "gc/shared/gcHeapSummary.hpp" 32 #include "gc/shared/gcLocker.hpp" 33 #include "gc/shared/gcPolicyCounters.hpp" 34 #include "gc/shared/gcTimer.hpp" 35 #include "gc/shared/gcTrace.hpp" 36 #include "gc/shared/gcTraceTime.inline.hpp" 37 #include "gc/shared/genCollectedHeap.hpp" 38 #include "gc/shared/genOopClosures.inline.hpp" 39 #include "gc/shared/generationSpec.hpp" 40 #include "gc/shared/preservedMarks.inline.hpp" 41 #include "gc/shared/referencePolicy.hpp" 42 #include "gc/shared/space.inline.hpp" 43 #include "gc/shared/spaceDecorator.hpp" 44 #include "gc/shared/strongRootsScope.hpp" 45 #include "gc/shared/weakProcessor.hpp" 46 #include "logging/log.hpp" 47 #include "memory/iterator.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "oops/instanceRefKlass.hpp" 50 #include "oops/oop.inline.hpp" 51 #include "runtime/atomic.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/prefetch.inline.hpp" 54 #include "runtime/thread.inline.hpp" 55 #include "utilities/align.hpp" 56 #include "utilities/copy.hpp" 57 #include "utilities/globalDefinitions.hpp" 58 #include "utilities/stack.inline.hpp" 59 #if INCLUDE_ALL_GCS 60 #include "gc/cms/parOopClosures.hpp" 61 #endif 62 63 // 64 // DefNewGeneration functions. 65 66 // Methods of protected closure types. 67 68 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) { 69 assert(_young_gen->kind() == Generation::ParNew || 70 _young_gen->kind() == Generation::DefNew, "Expected the young generation here"); 71 } 72 73 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 74 return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded(); 75 } 76 77 DefNewGeneration::KeepAliveClosure:: 78 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 79 _rs = GenCollectedHeap::heap()->rem_set(); 80 } 81 82 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 83 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 84 85 86 DefNewGeneration::FastKeepAliveClosure:: 87 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 88 DefNewGeneration::KeepAliveClosure(cl) { 89 _boundary = g->reserved().end(); 90 } 91 92 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 93 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 94 95 DefNewGeneration::EvacuateFollowersClosure:: 96 EvacuateFollowersClosure(GenCollectedHeap* gch, 97 ScanClosure* cur, 98 ScanClosure* older) : 99 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 100 {} 101 102 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 103 do { 104 _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older); 105 } while (!_gch->no_allocs_since_save_marks()); 106 } 107 108 DefNewGeneration::FastEvacuateFollowersClosure:: 109 FastEvacuateFollowersClosure(GenCollectedHeap* gch, 110 FastScanClosure* cur, 111 FastScanClosure* older) : 112 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 113 { 114 assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew"); 115 _young_gen = (DefNewGeneration*)_gch->young_gen(); 116 } 117 118 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 119 do { 120 _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older); 121 } while (!_gch->no_allocs_since_save_marks()); 122 guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 123 } 124 125 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 126 OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 127 { 128 _boundary = _g->reserved().end(); 129 } 130 131 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 132 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 133 134 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 135 OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 136 { 137 _boundary = _g->reserved().end(); 138 } 139 140 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 141 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 142 143 void CLDScanClosure::do_cld(ClassLoaderData* cld) { 144 NOT_PRODUCT(ResourceMark rm); 145 log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s", 146 p2i(cld), 147 cld->loader_name(), 148 cld->has_modified_oops() ? "true" : "false"); 149 150 // If the cld has not been dirtied we know that there's 151 // no references into the young gen and we can skip it. 152 if (cld->has_modified_oops()) { 153 if (_accumulate_modified_oops) { 154 cld->accumulate_modified_oops(); 155 } 156 157 // Tell the closure which CLD is being scanned so that it can be dirtied 158 // if oops are left pointing into the young gen. 159 _scavenge_closure->set_scanned_cld(cld); 160 161 // Clean the cld since we're going to scavenge all the metadata. 162 cld->oops_do(_scavenge_closure, false, /*clear_modified_oops*/true); 163 164 _scavenge_closure->set_scanned_cld(NULL); 165 } 166 } 167 168 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 169 _g(g) 170 { 171 _boundary = _g->reserved().end(); 172 } 173 174 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 175 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 176 177 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 178 size_t initial_size, 179 const char* policy) 180 : Generation(rs, initial_size), 181 _preserved_marks_set(false /* in_c_heap */), 182 _promo_failure_drain_in_progress(false), 183 _should_allocate_from_space(false) 184 { 185 MemRegion cmr((HeapWord*)_virtual_space.low(), 186 (HeapWord*)_virtual_space.high()); 187 GenCollectedHeap* gch = GenCollectedHeap::heap(); 188 189 gch->rem_set()->resize_covered_region(cmr); 190 191 _eden_space = new ContiguousSpace(); 192 _from_space = new ContiguousSpace(); 193 _to_space = new ContiguousSpace(); 194 195 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { 196 vm_exit_during_initialization("Could not allocate a new gen space"); 197 } 198 199 // Compute the maximum eden and survivor space sizes. These sizes 200 // are computed assuming the entire reserved space is committed. 201 // These values are exported as performance counters. 202 uintx alignment = gch->collector_policy()->space_alignment(); 203 uintx size = _virtual_space.reserved_size(); 204 _max_survivor_size = compute_survivor_size(size, alignment); 205 _max_eden_size = size - (2*_max_survivor_size); 206 207 // allocate the performance counters 208 GenCollectorPolicy* gcp = gch->gen_policy(); 209 210 // Generation counters -- generation 0, 3 subspaces 211 _gen_counters = new GenerationCounters("new", 0, 3, 212 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space); 213 _gc_counters = new CollectorCounters(policy, 0); 214 215 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 216 _gen_counters); 217 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 218 _gen_counters); 219 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 220 _gen_counters); 221 222 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 223 update_counters(); 224 _old_gen = NULL; 225 _tenuring_threshold = MaxTenuringThreshold; 226 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 227 228 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 229 } 230 231 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 232 bool clear_space, 233 bool mangle_space) { 234 uintx alignment = 235 GenCollectedHeap::heap()->collector_policy()->space_alignment(); 236 237 // If the spaces are being cleared (only done at heap initialization 238 // currently), the survivor spaces need not be empty. 239 // Otherwise, no care is taken for used areas in the survivor spaces 240 // so check. 241 assert(clear_space || (to()->is_empty() && from()->is_empty()), 242 "Initialization of the survivor spaces assumes these are empty"); 243 244 // Compute sizes 245 uintx size = _virtual_space.committed_size(); 246 uintx survivor_size = compute_survivor_size(size, alignment); 247 uintx eden_size = size - (2*survivor_size); 248 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 249 250 if (eden_size < minimum_eden_size) { 251 // May happen due to 64Kb rounding, if so adjust eden size back up 252 minimum_eden_size = align_up(minimum_eden_size, alignment); 253 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 254 uintx unaligned_survivor_size = 255 align_down(maximum_survivor_size, alignment); 256 survivor_size = MAX2(unaligned_survivor_size, alignment); 257 eden_size = size - (2*survivor_size); 258 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 259 assert(eden_size >= minimum_eden_size, "just checking"); 260 } 261 262 char *eden_start = _virtual_space.low(); 263 char *from_start = eden_start + eden_size; 264 char *to_start = from_start + survivor_size; 265 char *to_end = to_start + survivor_size; 266 267 assert(to_end == _virtual_space.high(), "just checking"); 268 assert(Space::is_aligned(eden_start), "checking alignment"); 269 assert(Space::is_aligned(from_start), "checking alignment"); 270 assert(Space::is_aligned(to_start), "checking alignment"); 271 272 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 273 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 274 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 275 276 // A minimum eden size implies that there is a part of eden that 277 // is being used and that affects the initialization of any 278 // newly formed eden. 279 bool live_in_eden = minimum_eden_size > 0; 280 281 // If not clearing the spaces, do some checking to verify that 282 // the space are already mangled. 283 if (!clear_space) { 284 // Must check mangling before the spaces are reshaped. Otherwise, 285 // the bottom or end of one space may have moved into another 286 // a failure of the check may not correctly indicate which space 287 // is not properly mangled. 288 if (ZapUnusedHeapArea) { 289 HeapWord* limit = (HeapWord*) _virtual_space.high(); 290 eden()->check_mangled_unused_area(limit); 291 from()->check_mangled_unused_area(limit); 292 to()->check_mangled_unused_area(limit); 293 } 294 } 295 296 // Reset the spaces for their new regions. 297 eden()->initialize(edenMR, 298 clear_space && !live_in_eden, 299 SpaceDecorator::Mangle); 300 // If clear_space and live_in_eden, we will not have cleared any 301 // portion of eden above its top. This can cause newly 302 // expanded space not to be mangled if using ZapUnusedHeapArea. 303 // We explicitly do such mangling here. 304 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 305 eden()->mangle_unused_area(); 306 } 307 from()->initialize(fromMR, clear_space, mangle_space); 308 to()->initialize(toMR, clear_space, mangle_space); 309 310 // Set next compaction spaces. 311 eden()->set_next_compaction_space(from()); 312 // The to-space is normally empty before a compaction so need 313 // not be considered. The exception is during promotion 314 // failure handling when to-space can contain live objects. 315 from()->set_next_compaction_space(NULL); 316 } 317 318 void DefNewGeneration::swap_spaces() { 319 ContiguousSpace* s = from(); 320 _from_space = to(); 321 _to_space = s; 322 eden()->set_next_compaction_space(from()); 323 // The to-space is normally empty before a compaction so need 324 // not be considered. The exception is during promotion 325 // failure handling when to-space can contain live objects. 326 from()->set_next_compaction_space(NULL); 327 328 if (UsePerfData) { 329 CSpaceCounters* c = _from_counters; 330 _from_counters = _to_counters; 331 _to_counters = c; 332 } 333 } 334 335 bool DefNewGeneration::expand(size_t bytes) { 336 MutexLocker x(ExpandHeap_lock); 337 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 338 bool success = _virtual_space.expand_by(bytes); 339 if (success && ZapUnusedHeapArea) { 340 // Mangle newly committed space immediately because it 341 // can be done here more simply that after the new 342 // spaces have been computed. 343 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 344 MemRegion mangle_region(prev_high, new_high); 345 SpaceMangler::mangle_region(mangle_region); 346 } 347 348 // Do not attempt an expand-to-the reserve size. The 349 // request should properly observe the maximum size of 350 // the generation so an expand-to-reserve should be 351 // unnecessary. Also a second call to expand-to-reserve 352 // value potentially can cause an undue expansion. 353 // For example if the first expand fail for unknown reasons, 354 // but the second succeeds and expands the heap to its maximum 355 // value. 356 if (GCLocker::is_active()) { 357 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 358 } 359 360 return success; 361 } 362 363 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate, 364 size_t new_size_before, 365 size_t alignment) const { 366 size_t desired_new_size = new_size_before; 367 368 if (NewSizeThreadIncrease > 0) { 369 int threads_count; 370 size_t thread_increase_size = 0; 371 372 // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'. 373 threads_count = Threads::number_of_non_daemon_threads(); 374 if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) { 375 thread_increase_size = threads_count * NewSizeThreadIncrease; 376 377 // 2. Check an overflow at 'new_size_candidate + thread_increase_size'. 378 if (new_size_candidate <= max_uintx - thread_increase_size) { 379 new_size_candidate += thread_increase_size; 380 381 // 3. Check an overflow at 'align_up'. 382 size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1)); 383 if (new_size_candidate <= aligned_max) { 384 desired_new_size = align_up(new_size_candidate, alignment); 385 } 386 } 387 } 388 } 389 390 return desired_new_size; 391 } 392 393 void DefNewGeneration::compute_new_size() { 394 // This is called after a GC that includes the old generation, so from-space 395 // will normally be empty. 396 // Note that we check both spaces, since if scavenge failed they revert roles. 397 // If not we bail out (otherwise we would have to relocate the objects). 398 if (!from()->is_empty() || !to()->is_empty()) { 399 return; 400 } 401 402 GenCollectedHeap* gch = GenCollectedHeap::heap(); 403 404 size_t old_size = gch->old_gen()->capacity(); 405 size_t new_size_before = _virtual_space.committed_size(); 406 size_t min_new_size = initial_size(); 407 size_t max_new_size = reserved().byte_size(); 408 assert(min_new_size <= new_size_before && 409 new_size_before <= max_new_size, 410 "just checking"); 411 // All space sizes must be multiples of Generation::GenGrain. 412 size_t alignment = Generation::GenGrain; 413 414 int threads_count = 0; 415 size_t thread_increase_size = 0; 416 417 size_t new_size_candidate = old_size / NewRatio; 418 // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease 419 // and reverts to previous value if any overflow happens 420 size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment); 421 422 // Adjust new generation size 423 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 424 assert(desired_new_size <= max_new_size, "just checking"); 425 426 bool changed = false; 427 if (desired_new_size > new_size_before) { 428 size_t change = desired_new_size - new_size_before; 429 assert(change % alignment == 0, "just checking"); 430 if (expand(change)) { 431 changed = true; 432 } 433 // If the heap failed to expand to the desired size, 434 // "changed" will be false. If the expansion failed 435 // (and at this point it was expected to succeed), 436 // ignore the failure (leaving "changed" as false). 437 } 438 if (desired_new_size < new_size_before && eden()->is_empty()) { 439 // bail out of shrinking if objects in eden 440 size_t change = new_size_before - desired_new_size; 441 assert(change % alignment == 0, "just checking"); 442 _virtual_space.shrink_by(change); 443 changed = true; 444 } 445 if (changed) { 446 // The spaces have already been mangled at this point but 447 // may not have been cleared (set top = bottom) and should be. 448 // Mangling was done when the heap was being expanded. 449 compute_space_boundaries(eden()->used(), 450 SpaceDecorator::Clear, 451 SpaceDecorator::DontMangle); 452 MemRegion cmr((HeapWord*)_virtual_space.low(), 453 (HeapWord*)_virtual_space.high()); 454 gch->rem_set()->resize_covered_region(cmr); 455 456 log_debug(gc, ergo, heap)( 457 "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 458 new_size_before/K, _virtual_space.committed_size()/K, 459 eden()->capacity()/K, from()->capacity()/K); 460 log_trace(gc, ergo, heap)( 461 " [allowed " SIZE_FORMAT "K extra for %d threads]", 462 thread_increase_size/K, threads_count); 463 } 464 } 465 466 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) { 467 assert(false, "NYI -- are you sure you want to call this?"); 468 } 469 470 471 size_t DefNewGeneration::capacity() const { 472 return eden()->capacity() 473 + from()->capacity(); // to() is only used during scavenge 474 } 475 476 477 size_t DefNewGeneration::used() const { 478 return eden()->used() 479 + from()->used(); // to() is only used during scavenge 480 } 481 482 483 size_t DefNewGeneration::free() const { 484 return eden()->free() 485 + from()->free(); // to() is only used during scavenge 486 } 487 488 size_t DefNewGeneration::max_capacity() const { 489 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 490 const size_t reserved_bytes = reserved().byte_size(); 491 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 492 } 493 494 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 495 return eden()->free(); 496 } 497 498 size_t DefNewGeneration::capacity_before_gc() const { 499 return eden()->capacity(); 500 } 501 502 size_t DefNewGeneration::contiguous_available() const { 503 return eden()->free(); 504 } 505 506 507 HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); } 508 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 509 510 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 511 eden()->object_iterate(blk); 512 from()->object_iterate(blk); 513 } 514 515 516 void DefNewGeneration::space_iterate(SpaceClosure* blk, 517 bool usedOnly) { 518 blk->do_space(eden()); 519 blk->do_space(from()); 520 blk->do_space(to()); 521 } 522 523 // The last collection bailed out, we are running out of heap space, 524 // so we try to allocate the from-space, too. 525 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 526 bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc(); 527 528 // If the Heap_lock is not locked by this thread, this will be called 529 // again later with the Heap_lock held. 530 bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread())); 531 532 HeapWord* result = NULL; 533 if (do_alloc) { 534 result = from()->allocate(size); 535 } 536 537 log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "): will_fail: %s heap_lock: %s free: " SIZE_FORMAT "%s%s returns %s", 538 size, 539 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 540 "true" : "false", 541 Heap_lock->is_locked() ? "locked" : "unlocked", 542 from()->free(), 543 should_try_alloc ? "" : " should_allocate_from_space: NOT", 544 do_alloc ? " Heap_lock is not owned by self" : "", 545 result == NULL ? "NULL" : "object"); 546 547 return result; 548 } 549 550 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 551 bool is_tlab, 552 bool parallel) { 553 // We don't attempt to expand the young generation (but perhaps we should.) 554 return allocate(size, is_tlab); 555 } 556 557 void DefNewGeneration::adjust_desired_tenuring_threshold() { 558 // Set the desired survivor size to half the real survivor space 559 size_t const survivor_capacity = to()->capacity() / HeapWordSize; 560 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100); 561 562 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size); 563 564 if (UsePerfData) { 565 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters(); 566 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold); 567 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize); 568 } 569 570 age_table()->print_age_table(_tenuring_threshold); 571 } 572 573 void DefNewGeneration::collect(bool full, 574 bool clear_all_soft_refs, 575 size_t size, 576 bool is_tlab) { 577 assert(full || size > 0, "otherwise we don't want to collect"); 578 579 GenCollectedHeap* gch = GenCollectedHeap::heap(); 580 581 _gc_timer->register_gc_start(); 582 DefNewTracer gc_tracer; 583 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 584 585 _old_gen = gch->old_gen(); 586 587 // If the next generation is too full to accommodate promotion 588 // from this generation, pass on collection; let the next generation 589 // do it. 590 if (!collection_attempt_is_safe()) { 591 log_trace(gc)(":: Collection attempt not safe ::"); 592 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 593 return; 594 } 595 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 596 597 init_assuming_no_promotion_failure(); 598 599 GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, gch->gc_cause()); 600 601 gch->trace_heap_before_gc(&gc_tracer); 602 603 // These can be shared for all code paths 604 IsAliveClosure is_alive(this); 605 ScanWeakRefClosure scan_weak_ref(this); 606 607 age_table()->clear(); 608 to()->clear(SpaceDecorator::Mangle); 609 // The preserved marks should be empty at the start of the GC. 610 _preserved_marks_set.init(1); 611 612 gch->rem_set()->prepare_for_younger_refs_iterate(false); 613 614 assert(gch->no_allocs_since_save_marks(), 615 "save marks have not been newly set."); 616 617 FastScanClosure fsc_with_no_gc_barrier(this, false); 618 FastScanClosure fsc_with_gc_barrier(this, true); 619 620 CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier, 621 gch->rem_set()->cld_rem_set()->accumulate_modified_oops()); 622 623 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 624 FastEvacuateFollowersClosure evacuate_followers(gch, 625 &fsc_with_no_gc_barrier, 626 &fsc_with_gc_barrier); 627 628 assert(gch->no_allocs_since_save_marks(), 629 "save marks have not been newly set."); 630 631 { 632 // DefNew needs to run with n_threads == 0, to make sure the serial 633 // version of the card table scanning code is used. 634 // See: CardTableRS::non_clean_card_iterate_possibly_parallel. 635 StrongRootsScope srs(0); 636 637 gch->young_process_roots(&srs, 638 &fsc_with_no_gc_barrier, 639 &fsc_with_gc_barrier, 640 &cld_scan_closure); 641 } 642 643 // "evacuate followers". 644 evacuate_followers.do_void(); 645 646 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 647 ReferenceProcessor* rp = ref_processor(); 648 rp->setup_policy(clear_all_soft_refs); 649 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q()); 650 const ReferenceProcessorStats& stats = 651 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 652 NULL, &pt); 653 gc_tracer.report_gc_reference_stats(stats); 654 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 655 pt.print_all_references(); 656 657 assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set."); 658 659 WeakProcessor::weak_oops_do(&is_alive, &keep_alive); 660 661 // Verify that the usage of keep_alive didn't copy any objects. 662 assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set."); 663 664 if (!_promotion_failed) { 665 // Swap the survivor spaces. 666 eden()->clear(SpaceDecorator::Mangle); 667 from()->clear(SpaceDecorator::Mangle); 668 if (ZapUnusedHeapArea) { 669 // This is now done here because of the piece-meal mangling which 670 // can check for valid mangling at intermediate points in the 671 // collection(s). When a young collection fails to collect 672 // sufficient space resizing of the young generation can occur 673 // an redistribute the spaces in the young generation. Mangle 674 // here so that unzapped regions don't get distributed to 675 // other spaces. 676 to()->mangle_unused_area(); 677 } 678 swap_spaces(); 679 680 assert(to()->is_empty(), "to space should be empty now"); 681 682 adjust_desired_tenuring_threshold(); 683 684 // A successful scavenge should restart the GC time limit count which is 685 // for full GC's. 686 AdaptiveSizePolicy* size_policy = gch->size_policy(); 687 size_policy->reset_gc_overhead_limit_count(); 688 assert(!gch->incremental_collection_failed(), "Should be clear"); 689 } else { 690 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 691 _promo_failure_scan_stack.clear(true); // Clear cached segments. 692 693 remove_forwarding_pointers(); 694 log_info(gc, promotion)("Promotion failed"); 695 // Add to-space to the list of space to compact 696 // when a promotion failure has occurred. In that 697 // case there can be live objects in to-space 698 // as a result of a partial evacuation of eden 699 // and from-space. 700 swap_spaces(); // For uniformity wrt ParNewGeneration. 701 from()->set_next_compaction_space(to()); 702 gch->set_incremental_collection_failed(); 703 704 // Inform the next generation that a promotion failure occurred. 705 _old_gen->promotion_failure_occurred(); 706 gc_tracer.report_promotion_failed(_promotion_failed_info); 707 708 // Reset the PromotionFailureALot counters. 709 NOT_PRODUCT(gch->reset_promotion_should_fail();) 710 } 711 // We should have processed and cleared all the preserved marks. 712 _preserved_marks_set.reclaim(); 713 // set new iteration safe limit for the survivor spaces 714 from()->set_concurrent_iteration_safe_limit(from()->top()); 715 to()->set_concurrent_iteration_safe_limit(to()->top()); 716 717 // We need to use a monotonically non-decreasing time in ms 718 // or we will see time-warp warnings and os::javaTimeMillis() 719 // does not guarantee monotonicity. 720 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 721 update_time_of_last_gc(now); 722 723 gch->trace_heap_after_gc(&gc_tracer); 724 725 _gc_timer->register_gc_end(); 726 727 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 728 } 729 730 void DefNewGeneration::init_assuming_no_promotion_failure() { 731 _promotion_failed = false; 732 _promotion_failed_info.reset(); 733 from()->set_next_compaction_space(NULL); 734 } 735 736 void DefNewGeneration::remove_forwarding_pointers() { 737 RemoveForwardedPointerClosure rspc; 738 eden()->object_iterate(&rspc); 739 from()->object_iterate(&rspc); 740 restore_preserved_marks(); 741 } 742 743 void DefNewGeneration::restore_preserved_marks() { 744 SharedRestorePreservedMarksTaskExecutor task_executor(NULL); 745 _preserved_marks_set.restore(&task_executor); 746 } 747 748 void DefNewGeneration::handle_promotion_failure(oop old) { 749 log_debug(gc, promotion)("Promotion failure size = %d) ", old->size()); 750 751 _promotion_failed = true; 752 _promotion_failed_info.register_copy_failure(old->size()); 753 _preserved_marks_set.get()->push_if_necessary(old, old->mark_raw()); 754 // forward to self 755 old->forward_to(old); 756 757 _promo_failure_scan_stack.push(old); 758 759 if (!_promo_failure_drain_in_progress) { 760 // prevent recursion in copy_to_survivor_space() 761 _promo_failure_drain_in_progress = true; 762 drain_promo_failure_scan_stack(); 763 _promo_failure_drain_in_progress = false; 764 } 765 } 766 767 oop DefNewGeneration::copy_to_survivor_space(oop old) { 768 assert(is_in_reserved(old) && !old->is_forwarded(), 769 "shouldn't be scavenging this oop"); 770 size_t s = old->size(); 771 oop obj = NULL; 772 773 // Try allocating obj in to-space (unless too old) 774 if (old->age() < tenuring_threshold()) { 775 obj = (oop) to()->allocate_aligned(s); 776 } 777 778 // Otherwise try allocating obj tenured 779 if (obj == NULL) { 780 obj = _old_gen->promote(old, s); 781 if (obj == NULL) { 782 handle_promotion_failure(old); 783 return old; 784 } 785 } else { 786 // Prefetch beyond obj 787 const intx interval = PrefetchCopyIntervalInBytes; 788 Prefetch::write(obj, interval); 789 790 // Copy obj 791 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 792 793 // Increment age if obj still in new generation 794 obj->incr_age(); 795 age_table()->add(obj, s); 796 } 797 798 // Done, insert forward pointer to obj in this header 799 old->forward_to(obj); 800 801 return obj; 802 } 803 804 void DefNewGeneration::drain_promo_failure_scan_stack() { 805 while (!_promo_failure_scan_stack.is_empty()) { 806 oop obj = _promo_failure_scan_stack.pop(); 807 obj->oop_iterate(_promo_failure_scan_stack_closure); 808 } 809 } 810 811 void DefNewGeneration::save_marks() { 812 eden()->set_saved_mark(); 813 to()->set_saved_mark(); 814 from()->set_saved_mark(); 815 } 816 817 818 void DefNewGeneration::reset_saved_marks() { 819 eden()->reset_saved_mark(); 820 to()->reset_saved_mark(); 821 from()->reset_saved_mark(); 822 } 823 824 825 bool DefNewGeneration::no_allocs_since_save_marks() { 826 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 827 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 828 return to()->saved_mark_at_top(); 829 } 830 831 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 832 \ 833 void DefNewGeneration:: \ 834 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 835 cl->set_generation(this); \ 836 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 837 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 838 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 839 cl->reset_generation(); \ 840 save_marks(); \ 841 } 842 843 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 844 845 #undef DefNew_SINCE_SAVE_MARKS_DEFN 846 847 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 848 size_t max_alloc_words) { 849 if (requestor == this || _promotion_failed) { 850 return; 851 } 852 assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation"); 853 854 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 855 if (to_space->top() > to_space->bottom()) { 856 trace("to_space not empty when contribute_scratch called"); 857 } 858 */ 859 860 ContiguousSpace* to_space = to(); 861 assert(to_space->end() >= to_space->top(), "pointers out of order"); 862 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 863 if (free_words >= MinFreeScratchWords) { 864 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 865 sb->num_words = free_words; 866 sb->next = list; 867 list = sb; 868 } 869 } 870 871 void DefNewGeneration::reset_scratch() { 872 // If contributing scratch in to_space, mangle all of 873 // to_space if ZapUnusedHeapArea. This is needed because 874 // top is not maintained while using to-space as scratch. 875 if (ZapUnusedHeapArea) { 876 to()->mangle_unused_area_complete(); 877 } 878 } 879 880 bool DefNewGeneration::collection_attempt_is_safe() { 881 if (!to()->is_empty()) { 882 log_trace(gc)(":: to is not empty ::"); 883 return false; 884 } 885 if (_old_gen == NULL) { 886 GenCollectedHeap* gch = GenCollectedHeap::heap(); 887 _old_gen = gch->old_gen(); 888 } 889 return _old_gen->promotion_attempt_is_safe(used()); 890 } 891 892 void DefNewGeneration::gc_epilogue(bool full) { 893 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 894 895 assert(!GCLocker::is_active(), "We should not be executing here"); 896 // Check if the heap is approaching full after a collection has 897 // been done. Generally the young generation is empty at 898 // a minimum at the end of a collection. If it is not, then 899 // the heap is approaching full. 900 GenCollectedHeap* gch = GenCollectedHeap::heap(); 901 if (full) { 902 DEBUG_ONLY(seen_incremental_collection_failed = false;) 903 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 904 log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 905 GCCause::to_string(gch->gc_cause())); 906 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 907 set_should_allocate_from_space(); // we seem to be running out of space 908 } else { 909 log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 910 GCCause::to_string(gch->gc_cause())); 911 gch->clear_incremental_collection_failed(); // We just did a full collection 912 clear_should_allocate_from_space(); // if set 913 } 914 } else { 915 #ifdef ASSERT 916 // It is possible that incremental_collection_failed() == true 917 // here, because an attempted scavenge did not succeed. The policy 918 // is normally expected to cause a full collection which should 919 // clear that condition, so we should not be here twice in a row 920 // with incremental_collection_failed() == true without having done 921 // a full collection in between. 922 if (!seen_incremental_collection_failed && 923 gch->incremental_collection_failed()) { 924 log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 925 GCCause::to_string(gch->gc_cause())); 926 seen_incremental_collection_failed = true; 927 } else if (seen_incremental_collection_failed) { 928 log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 929 GCCause::to_string(gch->gc_cause())); 930 assert(gch->gc_cause() == GCCause::_scavenge_alot || 931 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 932 !gch->incremental_collection_failed(), 933 "Twice in a row"); 934 seen_incremental_collection_failed = false; 935 } 936 #endif // ASSERT 937 } 938 939 if (ZapUnusedHeapArea) { 940 eden()->check_mangled_unused_area_complete(); 941 from()->check_mangled_unused_area_complete(); 942 to()->check_mangled_unused_area_complete(); 943 } 944 945 if (!CleanChunkPoolAsync) { 946 Chunk::clean_chunk_pool(); 947 } 948 949 // update the generation and space performance counters 950 update_counters(); 951 gch->counters()->update_counters(); 952 } 953 954 void DefNewGeneration::record_spaces_top() { 955 assert(ZapUnusedHeapArea, "Not mangling unused space"); 956 eden()->set_top_for_allocations(); 957 to()->set_top_for_allocations(); 958 from()->set_top_for_allocations(); 959 } 960 961 void DefNewGeneration::ref_processor_init() { 962 Generation::ref_processor_init(); 963 } 964 965 966 void DefNewGeneration::update_counters() { 967 if (UsePerfData) { 968 _eden_counters->update_all(); 969 _from_counters->update_all(); 970 _to_counters->update_all(); 971 _gen_counters->update_all(); 972 } 973 } 974 975 void DefNewGeneration::verify() { 976 eden()->verify(); 977 from()->verify(); 978 to()->verify(); 979 } 980 981 void DefNewGeneration::print_on(outputStream* st) const { 982 Generation::print_on(st); 983 st->print(" eden"); 984 eden()->print_on(st); 985 st->print(" from"); 986 from()->print_on(st); 987 st->print(" to "); 988 to()->print_on(st); 989 } 990 991 992 const char* DefNewGeneration::name() const { 993 return "def new generation"; 994 } 995 996 // Moved from inline file as they are not called inline 997 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 998 return eden(); 999 } 1000 1001 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { 1002 // This is the slow-path allocation for the DefNewGeneration. 1003 // Most allocations are fast-path in compiled code. 1004 // We try to allocate from the eden. If that works, we are happy. 1005 // Note that since DefNewGeneration supports lock-free allocation, we 1006 // have to use it here, as well. 1007 HeapWord* result = eden()->par_allocate(word_size); 1008 if (result != NULL) { 1009 #if INCLUDE_ALL_GCS 1010 if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 1011 _old_gen->sample_eden_chunk(); 1012 } 1013 #endif 1014 } else { 1015 // If the eden is full and the last collection bailed out, we are running 1016 // out of heap space, and we try to allocate the from-space, too. 1017 // allocate_from_space can't be inlined because that would introduce a 1018 // circular dependency at compile time. 1019 result = allocate_from_space(word_size); 1020 } 1021 return result; 1022 } 1023 1024 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1025 bool is_tlab) { 1026 HeapWord* res = eden()->par_allocate(word_size); 1027 #if INCLUDE_ALL_GCS 1028 if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 1029 _old_gen->sample_eden_chunk(); 1030 } 1031 #endif 1032 return res; 1033 } 1034 1035 size_t DefNewGeneration::tlab_capacity() const { 1036 return eden()->capacity(); 1037 } 1038 1039 size_t DefNewGeneration::tlab_used() const { 1040 return eden()->used(); 1041 } 1042 1043 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1044 return unsafe_max_alloc_nogc(); 1045 }