1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/serial/defNewGeneration.inline.hpp" 27 #include "gc/shared/ageTable.inline.hpp" 28 #include "gc/shared/cardTableRS.hpp" 29 #include "gc/shared/collectorCounters.hpp" 30 #include "gc/shared/gcHeapSummary.hpp" 31 #include "gc/shared/gcLocker.inline.hpp" 32 #include "gc/shared/gcPolicyCounters.hpp" 33 #include "gc/shared/gcTimer.hpp" 34 #include "gc/shared/gcTrace.hpp" 35 #include "gc/shared/gcTraceTime.inline.hpp" 36 #include "gc/shared/genCollectedHeap.hpp" 37 #include "gc/shared/genOopClosures.inline.hpp" 38 #include "gc/shared/generationSpec.hpp" 39 #include "gc/shared/preservedMarks.inline.hpp" 40 #include "gc/shared/referencePolicy.hpp" 41 #include "gc/shared/space.inline.hpp" 42 #include "gc/shared/spaceDecorator.hpp" 43 #include "gc/shared/strongRootsScope.hpp" 44 #include "logging/log.hpp" 45 #include "memory/iterator.hpp" 46 #include "memory/resourceArea.hpp" 47 #include "oops/instanceRefKlass.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "runtime/atomic.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/prefetch.inline.hpp" 52 #include "runtime/thread.inline.hpp" 53 #include "utilities/align.hpp" 54 #include "utilities/copy.hpp" 55 #include "utilities/globalDefinitions.hpp" 56 #include "utilities/stack.inline.hpp" 57 #if INCLUDE_ALL_GCS 58 #include "gc/cms/parOopClosures.hpp" 59 #endif 60 61 // 62 // DefNewGeneration functions. 63 64 // Methods of protected closure types. 65 66 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) { 67 assert(_young_gen->kind() == Generation::ParNew || 68 _young_gen->kind() == Generation::DefNew, "Expected the young generation here"); 69 } 70 71 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 72 return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded(); 73 } 74 75 DefNewGeneration::KeepAliveClosure:: 76 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 77 _rs = GenCollectedHeap::heap()->rem_set(); 78 } 79 80 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 81 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 82 83 84 DefNewGeneration::FastKeepAliveClosure:: 85 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 86 DefNewGeneration::KeepAliveClosure(cl) { 87 _boundary = g->reserved().end(); 88 } 89 90 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 91 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 92 93 DefNewGeneration::EvacuateFollowersClosure:: 94 EvacuateFollowersClosure(GenCollectedHeap* gch, 95 ScanClosure* cur, 96 ScanClosure* older) : 97 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 98 {} 99 100 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 101 do { 102 _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older); 103 } while (!_gch->no_allocs_since_save_marks()); 104 } 105 106 DefNewGeneration::FastEvacuateFollowersClosure:: 107 FastEvacuateFollowersClosure(GenCollectedHeap* gch, 108 FastScanClosure* cur, 109 FastScanClosure* older) : 110 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 111 { 112 assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew"); 113 _young_gen = (DefNewGeneration*)_gch->young_gen(); 114 } 115 116 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 117 do { 118 _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older); 119 } while (!_gch->no_allocs_since_save_marks()); 120 guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 121 } 122 123 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 124 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 125 { 126 _boundary = _g->reserved().end(); 127 } 128 129 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 130 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 131 132 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 133 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 134 { 135 _boundary = _g->reserved().end(); 136 } 137 138 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 139 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 140 141 void KlassScanClosure::do_klass(Klass* klass) { 142 NOT_PRODUCT(ResourceMark rm); 143 log_develop_trace(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", 144 p2i(klass), 145 klass->external_name(), 146 klass->has_modified_oops() ? "true" : "false"); 147 148 // If the klass has not been dirtied we know that there's 149 // no references into the young gen and we can skip it. 150 if (klass->has_modified_oops()) { 151 if (_accumulate_modified_oops) { 152 klass->accumulate_modified_oops(); 153 } 154 155 // Clear this state since we're going to scavenge all the metadata. 156 klass->clear_modified_oops(); 157 158 // Tell the closure which Klass is being scanned so that it can be dirtied 159 // if oops are left pointing into the young gen. 160 _scavenge_closure->set_scanned_klass(klass); 161 162 klass->oops_do(_scavenge_closure); 163 164 _scavenge_closure->set_scanned_klass(NULL); 165 } 166 } 167 168 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 169 _g(g) 170 { 171 _boundary = _g->reserved().end(); 172 } 173 174 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 175 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 176 177 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 178 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 179 180 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, 181 KlassRemSet* klass_rem_set) 182 : _scavenge_closure(scavenge_closure), 183 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} 184 185 186 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 187 size_t initial_size, 188 const char* policy) 189 : Generation(rs, initial_size), 190 _preserved_marks_set(false /* in_c_heap */), 191 _promo_failure_drain_in_progress(false), 192 _should_allocate_from_space(false) 193 { 194 MemRegion cmr((HeapWord*)_virtual_space.low(), 195 (HeapWord*)_virtual_space.high()); 196 GenCollectedHeap* gch = GenCollectedHeap::heap(); 197 198 gch->barrier_set()->resize_covered_region(cmr); 199 200 _eden_space = new ContiguousSpace(); 201 _from_space = new ContiguousSpace(); 202 _to_space = new ContiguousSpace(); 203 204 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { 205 vm_exit_during_initialization("Could not allocate a new gen space"); 206 } 207 208 // Compute the maximum eden and survivor space sizes. These sizes 209 // are computed assuming the entire reserved space is committed. 210 // These values are exported as performance counters. 211 uintx alignment = gch->collector_policy()->space_alignment(); 212 uintx size = _virtual_space.reserved_size(); 213 _max_survivor_size = compute_survivor_size(size, alignment); 214 _max_eden_size = size - (2*_max_survivor_size); 215 216 // allocate the performance counters 217 GenCollectorPolicy* gcp = gch->gen_policy(); 218 219 // Generation counters -- generation 0, 3 subspaces 220 _gen_counters = new GenerationCounters("new", 0, 3, 221 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space); 222 _gc_counters = new CollectorCounters(policy, 0); 223 224 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 225 _gen_counters); 226 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 227 _gen_counters); 228 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 229 _gen_counters); 230 231 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 232 update_counters(); 233 _old_gen = NULL; 234 _tenuring_threshold = MaxTenuringThreshold; 235 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 236 237 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 238 } 239 240 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 241 bool clear_space, 242 bool mangle_space) { 243 uintx alignment = 244 GenCollectedHeap::heap()->collector_policy()->space_alignment(); 245 246 // If the spaces are being cleared (only done at heap initialization 247 // currently), the survivor spaces need not be empty. 248 // Otherwise, no care is taken for used areas in the survivor spaces 249 // so check. 250 assert(clear_space || (to()->is_empty() && from()->is_empty()), 251 "Initialization of the survivor spaces assumes these are empty"); 252 253 // Compute sizes 254 uintx size = _virtual_space.committed_size(); 255 uintx survivor_size = compute_survivor_size(size, alignment); 256 uintx eden_size = size - (2*survivor_size); 257 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 258 259 if (eden_size < minimum_eden_size) { 260 // May happen due to 64Kb rounding, if so adjust eden size back up 261 minimum_eden_size = align_up(minimum_eden_size, alignment); 262 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 263 uintx unaligned_survivor_size = 264 align_down(maximum_survivor_size, alignment); 265 survivor_size = MAX2(unaligned_survivor_size, alignment); 266 eden_size = size - (2*survivor_size); 267 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 268 assert(eden_size >= minimum_eden_size, "just checking"); 269 } 270 271 char *eden_start = _virtual_space.low(); 272 char *from_start = eden_start + eden_size; 273 char *to_start = from_start + survivor_size; 274 char *to_end = to_start + survivor_size; 275 276 assert(to_end == _virtual_space.high(), "just checking"); 277 assert(Space::is_aligned(eden_start), "checking alignment"); 278 assert(Space::is_aligned(from_start), "checking alignment"); 279 assert(Space::is_aligned(to_start), "checking alignment"); 280 281 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 282 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 283 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 284 285 // A minimum eden size implies that there is a part of eden that 286 // is being used and that affects the initialization of any 287 // newly formed eden. 288 bool live_in_eden = minimum_eden_size > 0; 289 290 // If not clearing the spaces, do some checking to verify that 291 // the space are already mangled. 292 if (!clear_space) { 293 // Must check mangling before the spaces are reshaped. Otherwise, 294 // the bottom or end of one space may have moved into another 295 // a failure of the check may not correctly indicate which space 296 // is not properly mangled. 297 if (ZapUnusedHeapArea) { 298 HeapWord* limit = (HeapWord*) _virtual_space.high(); 299 eden()->check_mangled_unused_area(limit); 300 from()->check_mangled_unused_area(limit); 301 to()->check_mangled_unused_area(limit); 302 } 303 } 304 305 // Reset the spaces for their new regions. 306 eden()->initialize(edenMR, 307 clear_space && !live_in_eden, 308 SpaceDecorator::Mangle); 309 // If clear_space and live_in_eden, we will not have cleared any 310 // portion of eden above its top. This can cause newly 311 // expanded space not to be mangled if using ZapUnusedHeapArea. 312 // We explicitly do such mangling here. 313 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 314 eden()->mangle_unused_area(); 315 } 316 from()->initialize(fromMR, clear_space, mangle_space); 317 to()->initialize(toMR, clear_space, mangle_space); 318 319 // Set next compaction spaces. 320 eden()->set_next_compaction_space(from()); 321 // The to-space is normally empty before a compaction so need 322 // not be considered. The exception is during promotion 323 // failure handling when to-space can contain live objects. 324 from()->set_next_compaction_space(NULL); 325 } 326 327 void DefNewGeneration::swap_spaces() { 328 ContiguousSpace* s = from(); 329 _from_space = to(); 330 _to_space = s; 331 eden()->set_next_compaction_space(from()); 332 // The to-space is normally empty before a compaction so need 333 // not be considered. The exception is during promotion 334 // failure handling when to-space can contain live objects. 335 from()->set_next_compaction_space(NULL); 336 337 if (UsePerfData) { 338 CSpaceCounters* c = _from_counters; 339 _from_counters = _to_counters; 340 _to_counters = c; 341 } 342 } 343 344 bool DefNewGeneration::expand(size_t bytes) { 345 MutexLocker x(ExpandHeap_lock); 346 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 347 bool success = _virtual_space.expand_by(bytes); 348 if (success && ZapUnusedHeapArea) { 349 // Mangle newly committed space immediately because it 350 // can be done here more simply that after the new 351 // spaces have been computed. 352 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 353 MemRegion mangle_region(prev_high, new_high); 354 SpaceMangler::mangle_region(mangle_region); 355 } 356 357 // Do not attempt an expand-to-the reserve size. The 358 // request should properly observe the maximum size of 359 // the generation so an expand-to-reserve should be 360 // unnecessary. Also a second call to expand-to-reserve 361 // value potentially can cause an undue expansion. 362 // For example if the first expand fail for unknown reasons, 363 // but the second succeeds and expands the heap to its maximum 364 // value. 365 if (GCLocker::is_active()) { 366 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 367 } 368 369 return success; 370 } 371 372 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate, 373 size_t new_size_before, 374 size_t alignment) const { 375 size_t desired_new_size = new_size_before; 376 377 if (NewSizeThreadIncrease > 0) { 378 int threads_count; 379 size_t thread_increase_size = 0; 380 381 // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'. 382 threads_count = Threads::number_of_non_daemon_threads(); 383 if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) { 384 thread_increase_size = threads_count * NewSizeThreadIncrease; 385 386 // 2. Check an overflow at 'new_size_candidate + thread_increase_size'. 387 if (new_size_candidate <= max_uintx - thread_increase_size) { 388 new_size_candidate += thread_increase_size; 389 390 // 3. Check an overflow at 'align_up'. 391 size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1)); 392 if (new_size_candidate <= aligned_max) { 393 desired_new_size = align_up(new_size_candidate, alignment); 394 } 395 } 396 } 397 } 398 399 return desired_new_size; 400 } 401 402 void DefNewGeneration::compute_new_size() { 403 // This is called after a GC that includes the old generation, so from-space 404 // will normally be empty. 405 // Note that we check both spaces, since if scavenge failed they revert roles. 406 // If not we bail out (otherwise we would have to relocate the objects). 407 if (!from()->is_empty() || !to()->is_empty()) { 408 return; 409 } 410 411 GenCollectedHeap* gch = GenCollectedHeap::heap(); 412 413 size_t old_size = gch->old_gen()->capacity(); 414 size_t new_size_before = _virtual_space.committed_size(); 415 size_t min_new_size = initial_size(); 416 size_t max_new_size = reserved().byte_size(); 417 assert(min_new_size <= new_size_before && 418 new_size_before <= max_new_size, 419 "just checking"); 420 // All space sizes must be multiples of Generation::GenGrain. 421 size_t alignment = Generation::GenGrain; 422 423 int threads_count = 0; 424 size_t thread_increase_size = 0; 425 426 size_t new_size_candidate = old_size / NewRatio; 427 // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease 428 // and reverts to previous value if any overflow happens 429 size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment); 430 431 // Adjust new generation size 432 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 433 assert(desired_new_size <= max_new_size, "just checking"); 434 435 bool changed = false; 436 if (desired_new_size > new_size_before) { 437 size_t change = desired_new_size - new_size_before; 438 assert(change % alignment == 0, "just checking"); 439 if (expand(change)) { 440 changed = true; 441 } 442 // If the heap failed to expand to the desired size, 443 // "changed" will be false. If the expansion failed 444 // (and at this point it was expected to succeed), 445 // ignore the failure (leaving "changed" as false). 446 } 447 if (desired_new_size < new_size_before && eden()->is_empty()) { 448 // bail out of shrinking if objects in eden 449 size_t change = new_size_before - desired_new_size; 450 assert(change % alignment == 0, "just checking"); 451 _virtual_space.shrink_by(change); 452 changed = true; 453 } 454 if (changed) { 455 // The spaces have already been mangled at this point but 456 // may not have been cleared (set top = bottom) and should be. 457 // Mangling was done when the heap was being expanded. 458 compute_space_boundaries(eden()->used(), 459 SpaceDecorator::Clear, 460 SpaceDecorator::DontMangle); 461 MemRegion cmr((HeapWord*)_virtual_space.low(), 462 (HeapWord*)_virtual_space.high()); 463 gch->barrier_set()->resize_covered_region(cmr); 464 465 log_debug(gc, ergo, heap)( 466 "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 467 new_size_before/K, _virtual_space.committed_size()/K, 468 eden()->capacity()/K, from()->capacity()/K); 469 log_trace(gc, ergo, heap)( 470 " [allowed " SIZE_FORMAT "K extra for %d threads]", 471 thread_increase_size/K, threads_count); 472 } 473 } 474 475 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) { 476 assert(false, "NYI -- are you sure you want to call this?"); 477 } 478 479 480 size_t DefNewGeneration::capacity() const { 481 return eden()->capacity() 482 + from()->capacity(); // to() is only used during scavenge 483 } 484 485 486 size_t DefNewGeneration::used() const { 487 return eden()->used() 488 + from()->used(); // to() is only used during scavenge 489 } 490 491 492 size_t DefNewGeneration::free() const { 493 return eden()->free() 494 + from()->free(); // to() is only used during scavenge 495 } 496 497 size_t DefNewGeneration::max_capacity() const { 498 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 499 const size_t reserved_bytes = reserved().byte_size(); 500 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 501 } 502 503 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 504 return eden()->free(); 505 } 506 507 size_t DefNewGeneration::capacity_before_gc() const { 508 return eden()->capacity(); 509 } 510 511 size_t DefNewGeneration::contiguous_available() const { 512 return eden()->free(); 513 } 514 515 516 HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); } 517 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 518 519 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 520 eden()->object_iterate(blk); 521 from()->object_iterate(blk); 522 } 523 524 525 void DefNewGeneration::space_iterate(SpaceClosure* blk, 526 bool usedOnly) { 527 blk->do_space(eden()); 528 blk->do_space(from()); 529 blk->do_space(to()); 530 } 531 532 // The last collection bailed out, we are running out of heap space, 533 // so we try to allocate the from-space, too. 534 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 535 bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc(); 536 537 // If the Heap_lock is not locked by this thread, this will be called 538 // again later with the Heap_lock held. 539 bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread())); 540 541 HeapWord* result = NULL; 542 if (do_alloc) { 543 result = from()->allocate(size); 544 } 545 546 log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "): will_fail: %s heap_lock: %s free: " SIZE_FORMAT "%s%s returns %s", 547 size, 548 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 549 "true" : "false", 550 Heap_lock->is_locked() ? "locked" : "unlocked", 551 from()->free(), 552 should_try_alloc ? "" : " should_allocate_from_space: NOT", 553 do_alloc ? " Heap_lock is not owned by self" : "", 554 result == NULL ? "NULL" : "object"); 555 556 return result; 557 } 558 559 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 560 bool is_tlab, 561 bool parallel) { 562 // We don't attempt to expand the young generation (but perhaps we should.) 563 return allocate(size, is_tlab); 564 } 565 566 void DefNewGeneration::adjust_desired_tenuring_threshold() { 567 // Set the desired survivor size to half the real survivor space 568 size_t const survivor_capacity = to()->capacity() / HeapWordSize; 569 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100); 570 571 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size); 572 573 if (UsePerfData) { 574 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters(); 575 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold); 576 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize); 577 } 578 579 age_table()->print_age_table(_tenuring_threshold); 580 } 581 582 void DefNewGeneration::collect(bool full, 583 bool clear_all_soft_refs, 584 size_t size, 585 bool is_tlab) { 586 assert(full || size > 0, "otherwise we don't want to collect"); 587 588 GenCollectedHeap* gch = GenCollectedHeap::heap(); 589 590 _gc_timer->register_gc_start(); 591 DefNewTracer gc_tracer; 592 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 593 594 _old_gen = gch->old_gen(); 595 596 // If the next generation is too full to accommodate promotion 597 // from this generation, pass on collection; let the next generation 598 // do it. 599 if (!collection_attempt_is_safe()) { 600 log_trace(gc)(":: Collection attempt not safe ::"); 601 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 602 return; 603 } 604 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 605 606 init_assuming_no_promotion_failure(); 607 608 GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, gch->gc_cause()); 609 610 gch->trace_heap_before_gc(&gc_tracer); 611 612 // These can be shared for all code paths 613 IsAliveClosure is_alive(this); 614 ScanWeakRefClosure scan_weak_ref(this); 615 616 age_table()->clear(); 617 to()->clear(SpaceDecorator::Mangle); 618 // The preserved marks should be empty at the start of the GC. 619 _preserved_marks_set.init(1); 620 621 gch->rem_set()->prepare_for_younger_refs_iterate(false); 622 623 assert(gch->no_allocs_since_save_marks(), 624 "save marks have not been newly set."); 625 626 // Not very pretty. 627 CollectorPolicy* cp = gch->collector_policy(); 628 629 FastScanClosure fsc_with_no_gc_barrier(this, false); 630 FastScanClosure fsc_with_gc_barrier(this, true); 631 632 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, 633 gch->rem_set()->klass_rem_set()); 634 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 635 &fsc_with_no_gc_barrier, 636 false); 637 638 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 639 FastEvacuateFollowersClosure evacuate_followers(gch, 640 &fsc_with_no_gc_barrier, 641 &fsc_with_gc_barrier); 642 643 assert(gch->no_allocs_since_save_marks(), 644 "save marks have not been newly set."); 645 646 { 647 // DefNew needs to run with n_threads == 0, to make sure the serial 648 // version of the card table scanning code is used. 649 // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel. 650 StrongRootsScope srs(0); 651 652 gch->young_process_roots(&srs, 653 &fsc_with_no_gc_barrier, 654 &fsc_with_gc_barrier, 655 &cld_scan_closure); 656 } 657 658 // "evacuate followers". 659 evacuate_followers.do_void(); 660 661 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 662 ReferenceProcessor* rp = ref_processor(); 663 rp->setup_policy(clear_all_soft_refs); 664 const ReferenceProcessorStats& stats = 665 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 666 NULL, _gc_timer); 667 gc_tracer.report_gc_reference_stats(stats); 668 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 669 670 if (!_promotion_failed) { 671 // Swap the survivor spaces. 672 eden()->clear(SpaceDecorator::Mangle); 673 from()->clear(SpaceDecorator::Mangle); 674 if (ZapUnusedHeapArea) { 675 // This is now done here because of the piece-meal mangling which 676 // can check for valid mangling at intermediate points in the 677 // collection(s). When a young collection fails to collect 678 // sufficient space resizing of the young generation can occur 679 // an redistribute the spaces in the young generation. Mangle 680 // here so that unzapped regions don't get distributed to 681 // other spaces. 682 to()->mangle_unused_area(); 683 } 684 swap_spaces(); 685 686 assert(to()->is_empty(), "to space should be empty now"); 687 688 adjust_desired_tenuring_threshold(); 689 690 // A successful scavenge should restart the GC time limit count which is 691 // for full GC's. 692 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 693 size_policy->reset_gc_overhead_limit_count(); 694 assert(!gch->incremental_collection_failed(), "Should be clear"); 695 } else { 696 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 697 _promo_failure_scan_stack.clear(true); // Clear cached segments. 698 699 remove_forwarding_pointers(); 700 log_info(gc, promotion)("Promotion failed"); 701 // Add to-space to the list of space to compact 702 // when a promotion failure has occurred. In that 703 // case there can be live objects in to-space 704 // as a result of a partial evacuation of eden 705 // and from-space. 706 swap_spaces(); // For uniformity wrt ParNewGeneration. 707 from()->set_next_compaction_space(to()); 708 gch->set_incremental_collection_failed(); 709 710 // Inform the next generation that a promotion failure occurred. 711 _old_gen->promotion_failure_occurred(); 712 gc_tracer.report_promotion_failed(_promotion_failed_info); 713 714 // Reset the PromotionFailureALot counters. 715 NOT_PRODUCT(gch->reset_promotion_should_fail();) 716 } 717 // We should have processed and cleared all the preserved marks. 718 _preserved_marks_set.reclaim(); 719 // set new iteration safe limit for the survivor spaces 720 from()->set_concurrent_iteration_safe_limit(from()->top()); 721 to()->set_concurrent_iteration_safe_limit(to()->top()); 722 723 // We need to use a monotonically non-decreasing time in ms 724 // or we will see time-warp warnings and os::javaTimeMillis() 725 // does not guarantee monotonicity. 726 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 727 update_time_of_last_gc(now); 728 729 gch->trace_heap_after_gc(&gc_tracer); 730 731 _gc_timer->register_gc_end(); 732 733 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 734 } 735 736 void DefNewGeneration::init_assuming_no_promotion_failure() { 737 _promotion_failed = false; 738 _promotion_failed_info.reset(); 739 from()->set_next_compaction_space(NULL); 740 } 741 742 void DefNewGeneration::remove_forwarding_pointers() { 743 RemoveForwardedPointerClosure rspc; 744 eden()->object_iterate(&rspc); 745 from()->object_iterate(&rspc); 746 747 SharedRestorePreservedMarksTaskExecutor task_executor(GenCollectedHeap::heap()->workers()); 748 _preserved_marks_set.restore(&task_executor); 749 } 750 751 void DefNewGeneration::handle_promotion_failure(oop old) { 752 log_debug(gc, promotion)("Promotion failure size = %d) ", old->size()); 753 754 _promotion_failed = true; 755 _promotion_failed_info.register_copy_failure(old->size()); 756 _preserved_marks_set.get()->push_if_necessary(old, old->mark()); 757 // forward to self 758 old->forward_to(old); 759 760 _promo_failure_scan_stack.push(old); 761 762 if (!_promo_failure_drain_in_progress) { 763 // prevent recursion in copy_to_survivor_space() 764 _promo_failure_drain_in_progress = true; 765 drain_promo_failure_scan_stack(); 766 _promo_failure_drain_in_progress = false; 767 } 768 } 769 770 oop DefNewGeneration::copy_to_survivor_space(oop old) { 771 assert(is_in_reserved(old) && !old->is_forwarded(), 772 "shouldn't be scavenging this oop"); 773 size_t s = old->size(); 774 oop obj = NULL; 775 776 // Try allocating obj in to-space (unless too old) 777 if (old->age() < tenuring_threshold()) { 778 obj = (oop) to()->allocate_aligned(s); 779 } 780 781 // Otherwise try allocating obj tenured 782 if (obj == NULL) { 783 obj = _old_gen->promote(old, s); 784 if (obj == NULL) { 785 handle_promotion_failure(old); 786 return old; 787 } 788 } else { 789 // Prefetch beyond obj 790 const intx interval = PrefetchCopyIntervalInBytes; 791 Prefetch::write(obj, interval); 792 793 // Copy obj 794 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 795 796 // Increment age if obj still in new generation 797 obj->incr_age(); 798 age_table()->add(obj, s); 799 } 800 801 // Done, insert forward pointer to obj in this header 802 old->forward_to(obj); 803 804 return obj; 805 } 806 807 void DefNewGeneration::drain_promo_failure_scan_stack() { 808 while (!_promo_failure_scan_stack.is_empty()) { 809 oop obj = _promo_failure_scan_stack.pop(); 810 obj->oop_iterate(_promo_failure_scan_stack_closure); 811 } 812 } 813 814 void DefNewGeneration::save_marks() { 815 eden()->set_saved_mark(); 816 to()->set_saved_mark(); 817 from()->set_saved_mark(); 818 } 819 820 821 void DefNewGeneration::reset_saved_marks() { 822 eden()->reset_saved_mark(); 823 to()->reset_saved_mark(); 824 from()->reset_saved_mark(); 825 } 826 827 828 bool DefNewGeneration::no_allocs_since_save_marks() { 829 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 830 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 831 return to()->saved_mark_at_top(); 832 } 833 834 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 835 \ 836 void DefNewGeneration:: \ 837 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 838 cl->set_generation(this); \ 839 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 840 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 841 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 842 cl->reset_generation(); \ 843 save_marks(); \ 844 } 845 846 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 847 848 #undef DefNew_SINCE_SAVE_MARKS_DEFN 849 850 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 851 size_t max_alloc_words) { 852 if (requestor == this || _promotion_failed) { 853 return; 854 } 855 assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation"); 856 857 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 858 if (to_space->top() > to_space->bottom()) { 859 trace("to_space not empty when contribute_scratch called"); 860 } 861 */ 862 863 ContiguousSpace* to_space = to(); 864 assert(to_space->end() >= to_space->top(), "pointers out of order"); 865 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 866 if (free_words >= MinFreeScratchWords) { 867 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 868 sb->num_words = free_words; 869 sb->next = list; 870 list = sb; 871 } 872 } 873 874 void DefNewGeneration::reset_scratch() { 875 // If contributing scratch in to_space, mangle all of 876 // to_space if ZapUnusedHeapArea. This is needed because 877 // top is not maintained while using to-space as scratch. 878 if (ZapUnusedHeapArea) { 879 to()->mangle_unused_area_complete(); 880 } 881 } 882 883 bool DefNewGeneration::collection_attempt_is_safe() { 884 if (!to()->is_empty()) { 885 log_trace(gc)(":: to is not empty ::"); 886 return false; 887 } 888 if (_old_gen == NULL) { 889 GenCollectedHeap* gch = GenCollectedHeap::heap(); 890 _old_gen = gch->old_gen(); 891 } 892 return _old_gen->promotion_attempt_is_safe(used()); 893 } 894 895 void DefNewGeneration::gc_epilogue(bool full) { 896 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 897 898 assert(!GCLocker::is_active(), "We should not be executing here"); 899 // Check if the heap is approaching full after a collection has 900 // been done. Generally the young generation is empty at 901 // a minimum at the end of a collection. If it is not, then 902 // the heap is approaching full. 903 GenCollectedHeap* gch = GenCollectedHeap::heap(); 904 if (full) { 905 DEBUG_ONLY(seen_incremental_collection_failed = false;) 906 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 907 log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 908 GCCause::to_string(gch->gc_cause())); 909 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 910 set_should_allocate_from_space(); // we seem to be running out of space 911 } else { 912 log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 913 GCCause::to_string(gch->gc_cause())); 914 gch->clear_incremental_collection_failed(); // We just did a full collection 915 clear_should_allocate_from_space(); // if set 916 } 917 } else { 918 #ifdef ASSERT 919 // It is possible that incremental_collection_failed() == true 920 // here, because an attempted scavenge did not succeed. The policy 921 // is normally expected to cause a full collection which should 922 // clear that condition, so we should not be here twice in a row 923 // with incremental_collection_failed() == true without having done 924 // a full collection in between. 925 if (!seen_incremental_collection_failed && 926 gch->incremental_collection_failed()) { 927 log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 928 GCCause::to_string(gch->gc_cause())); 929 seen_incremental_collection_failed = true; 930 } else if (seen_incremental_collection_failed) { 931 log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 932 GCCause::to_string(gch->gc_cause())); 933 assert(gch->gc_cause() == GCCause::_scavenge_alot || 934 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 935 !gch->incremental_collection_failed(), 936 "Twice in a row"); 937 seen_incremental_collection_failed = false; 938 } 939 #endif // ASSERT 940 } 941 942 if (ZapUnusedHeapArea) { 943 eden()->check_mangled_unused_area_complete(); 944 from()->check_mangled_unused_area_complete(); 945 to()->check_mangled_unused_area_complete(); 946 } 947 948 if (!CleanChunkPoolAsync) { 949 Chunk::clean_chunk_pool(); 950 } 951 952 // update the generation and space performance counters 953 update_counters(); 954 gch->gen_policy()->counters()->update_counters(); 955 } 956 957 void DefNewGeneration::record_spaces_top() { 958 assert(ZapUnusedHeapArea, "Not mangling unused space"); 959 eden()->set_top_for_allocations(); 960 to()->set_top_for_allocations(); 961 from()->set_top_for_allocations(); 962 } 963 964 void DefNewGeneration::ref_processor_init() { 965 Generation::ref_processor_init(); 966 } 967 968 969 void DefNewGeneration::update_counters() { 970 if (UsePerfData) { 971 _eden_counters->update_all(); 972 _from_counters->update_all(); 973 _to_counters->update_all(); 974 _gen_counters->update_all(); 975 } 976 } 977 978 void DefNewGeneration::verify() { 979 eden()->verify(); 980 from()->verify(); 981 to()->verify(); 982 } 983 984 void DefNewGeneration::print_on(outputStream* st) const { 985 Generation::print_on(st); 986 st->print(" eden"); 987 eden()->print_on(st); 988 st->print(" from"); 989 from()->print_on(st); 990 st->print(" to "); 991 to()->print_on(st); 992 } 993 994 995 const char* DefNewGeneration::name() const { 996 return "def new generation"; 997 } 998 999 // Moved from inline file as they are not called inline 1000 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 1001 return eden(); 1002 } 1003 1004 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { 1005 // This is the slow-path allocation for the DefNewGeneration. 1006 // Most allocations are fast-path in compiled code. 1007 // We try to allocate from the eden. If that works, we are happy. 1008 // Note that since DefNewGeneration supports lock-free allocation, we 1009 // have to use it here, as well. 1010 HeapWord* result = eden()->par_allocate(word_size); 1011 if (result != NULL) { 1012 if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 1013 _old_gen->sample_eden_chunk(); 1014 } 1015 } else { 1016 // If the eden is full and the last collection bailed out, we are running 1017 // out of heap space, and we try to allocate the from-space, too. 1018 // allocate_from_space can't be inlined because that would introduce a 1019 // circular dependency at compile time. 1020 result = allocate_from_space(word_size); 1021 } 1022 return result; 1023 } 1024 1025 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1026 bool is_tlab) { 1027 HeapWord* res = eden()->par_allocate(word_size); 1028 if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 1029 _old_gen->sample_eden_chunk(); 1030 } 1031 return res; 1032 } 1033 1034 size_t DefNewGeneration::tlab_capacity() const { 1035 return eden()->capacity(); 1036 } 1037 1038 size_t DefNewGeneration::tlab_used() const { 1039 return eden()->used(); 1040 } 1041 1042 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1043 return unsafe_max_alloc_nogc(); 1044 }