1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/collectorCounters.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/gcHeapSummary.hpp" 29 #include "gc_implementation/shared/gcTimer.hpp" 30 #include "gc_implementation/shared/gcTraceTime.hpp" 31 #include "gc_implementation/shared/gcTrace.hpp" 32 #include "gc_implementation/shared/spaceDecorator.hpp" 33 #include "memory/defNewGeneration.inline.hpp" 34 #include "memory/gcLocker.inline.hpp" 35 #include "memory/genCollectedHeap.hpp" 36 #include "memory/genOopClosures.inline.hpp" 37 #include "memory/genRemSet.hpp" 38 #include "memory/generationSpec.hpp" 39 #include "memory/iterator.hpp" 40 #include "memory/referencePolicy.hpp" 41 #include "memory/space.inline.hpp" 42 #include "oops/instanceRefKlass.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "runtime/atomic.inline.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/prefetch.inline.hpp" 47 #include "runtime/thread.inline.hpp" 48 #include "utilities/copy.hpp" 49 #include "utilities/globalDefinitions.hpp" 50 #include "utilities/stack.inline.hpp" 51 52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 53 54 // 55 // DefNewGeneration functions. 56 57 // Methods of protected closure types. 58 59 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { 60 assert(g->level() == 0, "Optimized for youngest gen."); 61 } 62 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 63 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); 64 } 65 66 DefNewGeneration::KeepAliveClosure:: 67 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 68 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); 69 _rs = (CardTableRS*)rs; 70 } 71 72 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 73 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 74 75 76 DefNewGeneration::FastKeepAliveClosure:: 77 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 78 DefNewGeneration::KeepAliveClosure(cl) { 79 _boundary = g->reserved().end(); 80 } 81 82 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 83 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 84 85 DefNewGeneration::EvacuateFollowersClosure:: 86 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, 87 ScanClosure* cur, ScanClosure* older) : 88 _gch(gch), _level(level), 89 _scan_cur_or_nonheap(cur), _scan_older(older) 90 {} 91 92 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 93 do { 94 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 95 _scan_older); 96 } while (!_gch->no_allocs_since_save_marks(_level)); 97 } 98 99 DefNewGeneration::FastEvacuateFollowersClosure:: 100 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, 101 DefNewGeneration* gen, 102 FastScanClosure* cur, FastScanClosure* older) : 103 _gch(gch), _level(level), _gen(gen), 104 _scan_cur_or_nonheap(cur), _scan_older(older) 105 {} 106 107 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 108 do { 109 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 110 _scan_older); 111 } while (!_gch->no_allocs_since_save_marks(_level)); 112 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 113 } 114 115 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 116 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 117 { 118 assert(_g->level() == 0, "Optimized for youngest generation"); 119 _boundary = _g->reserved().end(); 120 } 121 122 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 123 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 124 125 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 126 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 127 { 128 assert(_g->level() == 0, "Optimized for youngest generation"); 129 _boundary = _g->reserved().end(); 130 } 131 132 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 133 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 134 135 void KlassScanClosure::do_klass(Klass* klass) { 136 #ifndef PRODUCT 137 if (TraceScavenge) { 138 ResourceMark rm; 139 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", 140 klass, 141 klass->external_name(), 142 klass->has_modified_oops() ? "true" : "false"); 143 } 144 #endif 145 146 // If the klass has not been dirtied we know that there's 147 // no references into the young gen and we can skip it. 148 if (klass->has_modified_oops()) { 149 if (_accumulate_modified_oops) { 150 klass->accumulate_modified_oops(); 151 } 152 153 // Clear this state since we're going to scavenge all the metadata. 154 klass->clear_modified_oops(); 155 156 // Tell the closure which Klass is being scanned so that it can be dirtied 157 // if oops are left pointing into the young gen. 158 _scavenge_closure->set_scanned_klass(klass); 159 160 klass->oops_do(_scavenge_closure); 161 162 _scavenge_closure->set_scanned_klass(NULL); 163 } 164 } 165 166 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 167 _g(g) 168 { 169 assert(_g->level() == 0, "Optimized for youngest generation"); 170 _boundary = _g->reserved().end(); 171 } 172 173 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 174 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 175 176 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 178 179 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, 180 KlassRemSet* klass_rem_set) 181 : _scavenge_closure(scavenge_closure), 182 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} 183 184 185 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 186 size_t initial_size, 187 int level, 188 const char* policy) 189 : Generation(rs, initial_size, level), 190 _promo_failure_drain_in_progress(false), 191 _should_allocate_from_space(false) 192 { 193 MemRegion cmr((HeapWord*)_virtual_space.low(), 194 (HeapWord*)_virtual_space.high()); 195 Universe::heap()->barrier_set()->resize_covered_region(cmr); 196 197 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { 198 _eden_space = new ConcEdenSpace(this); 199 } else { 200 _eden_space = new EdenSpace(this); 201 } 202 _from_space = new ContiguousSpace(); 203 _to_space = new ContiguousSpace(); 204 205 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) 206 vm_exit_during_initialization("Could not allocate a new gen space"); 207 208 // Compute the maximum eden and survivor space sizes. These sizes 209 // are computed assuming the entire reserved space is committed. 210 // These values are exported as performance counters. 211 uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 212 uintx size = _virtual_space.reserved_size(); 213 _max_survivor_size = compute_survivor_size(size, alignment); 214 _max_eden_size = size - (2*_max_survivor_size); 215 216 // allocate the performance counters 217 218 // Generation counters -- generation 0, 3 subspaces 219 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); 220 _gc_counters = new CollectorCounters(policy, 0); 221 222 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 223 _gen_counters); 224 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 225 _gen_counters); 226 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 227 _gen_counters); 228 229 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 230 update_counters(); 231 _next_gen = NULL; 232 _tenuring_threshold = MaxTenuringThreshold; 233 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 234 235 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 236 } 237 238 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 239 bool clear_space, 240 bool mangle_space) { 241 uintx alignment = 242 GenCollectedHeap::heap()->collector_policy()->space_alignment(); 243 244 // If the spaces are being cleared (only done at heap initialization 245 // currently), the survivor spaces need not be empty. 246 // Otherwise, no care is taken for used areas in the survivor spaces 247 // so check. 248 assert(clear_space || (to()->is_empty() && from()->is_empty()), 249 "Initialization of the survivor spaces assumes these are empty"); 250 251 // Compute sizes 252 uintx size = _virtual_space.committed_size(); 253 uintx survivor_size = compute_survivor_size(size, alignment); 254 uintx eden_size = size - (2*survivor_size); 255 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 256 257 if (eden_size < minimum_eden_size) { 258 // May happen due to 64Kb rounding, if so adjust eden size back up 259 minimum_eden_size = align_size_up(minimum_eden_size, alignment); 260 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 261 uintx unaligned_survivor_size = 262 align_size_down(maximum_survivor_size, alignment); 263 survivor_size = MAX2(unaligned_survivor_size, alignment); 264 eden_size = size - (2*survivor_size); 265 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 266 assert(eden_size >= minimum_eden_size, "just checking"); 267 } 268 269 char *eden_start = _virtual_space.low(); 270 char *from_start = eden_start + eden_size; 271 char *to_start = from_start + survivor_size; 272 char *to_end = to_start + survivor_size; 273 274 assert(to_end == _virtual_space.high(), "just checking"); 275 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); 276 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); 277 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); 278 279 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 280 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 281 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 282 283 // A minimum eden size implies that there is a part of eden that 284 // is being used and that affects the initialization of any 285 // newly formed eden. 286 bool live_in_eden = minimum_eden_size > 0; 287 288 // If not clearing the spaces, do some checking to verify that 289 // the space are already mangled. 290 if (!clear_space) { 291 // Must check mangling before the spaces are reshaped. Otherwise, 292 // the bottom or end of one space may have moved into another 293 // a failure of the check may not correctly indicate which space 294 // is not properly mangled. 295 if (ZapUnusedHeapArea) { 296 HeapWord* limit = (HeapWord*) _virtual_space.high(); 297 eden()->check_mangled_unused_area(limit); 298 from()->check_mangled_unused_area(limit); 299 to()->check_mangled_unused_area(limit); 300 } 301 } 302 303 // Reset the spaces for their new regions. 304 eden()->initialize(edenMR, 305 clear_space && !live_in_eden, 306 SpaceDecorator::Mangle); 307 // If clear_space and live_in_eden, we will not have cleared any 308 // portion of eden above its top. This can cause newly 309 // expanded space not to be mangled if using ZapUnusedHeapArea. 310 // We explicitly do such mangling here. 311 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 312 eden()->mangle_unused_area(); 313 } 314 from()->initialize(fromMR, clear_space, mangle_space); 315 to()->initialize(toMR, clear_space, mangle_space); 316 317 // Set next compaction spaces. 318 eden()->set_next_compaction_space(from()); 319 // The to-space is normally empty before a compaction so need 320 // not be considered. The exception is during promotion 321 // failure handling when to-space can contain live objects. 322 from()->set_next_compaction_space(NULL); 323 } 324 325 void DefNewGeneration::swap_spaces() { 326 ContiguousSpace* s = from(); 327 _from_space = to(); 328 _to_space = s; 329 eden()->set_next_compaction_space(from()); 330 // The to-space is normally empty before a compaction so need 331 // not be considered. The exception is during promotion 332 // failure handling when to-space can contain live objects. 333 from()->set_next_compaction_space(NULL); 334 335 if (UsePerfData) { 336 CSpaceCounters* c = _from_counters; 337 _from_counters = _to_counters; 338 _to_counters = c; 339 } 340 } 341 342 bool DefNewGeneration::expand(size_t bytes) { 343 MutexLocker x(ExpandHeap_lock); 344 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 345 bool success = _virtual_space.expand_by(bytes); 346 if (success && ZapUnusedHeapArea) { 347 // Mangle newly committed space immediately because it 348 // can be done here more simply that after the new 349 // spaces have been computed. 350 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 351 MemRegion mangle_region(prev_high, new_high); 352 SpaceMangler::mangle_region(mangle_region); 353 } 354 355 // Do not attempt an expand-to-the reserve size. The 356 // request should properly observe the maximum size of 357 // the generation so an expand-to-reserve should be 358 // unnecessary. Also a second call to expand-to-reserve 359 // value potentially can cause an undue expansion. 360 // For example if the first expand fail for unknown reasons, 361 // but the second succeeds and expands the heap to its maximum 362 // value. 363 if (GC_locker::is_active()) { 364 if (PrintGC && Verbose) { 365 gclog_or_tty->print_cr("Garbage collection disabled, " 366 "expanded heap instead"); 367 } 368 } 369 370 return success; 371 } 372 373 374 void DefNewGeneration::compute_new_size() { 375 // This is called after a gc that includes the following generation 376 // (which is required to exist.) So from-space will normally be empty. 377 // Note that we check both spaces, since if scavenge failed they revert roles. 378 // If not we bail out (otherwise we would have to relocate the objects) 379 if (!from()->is_empty() || !to()->is_empty()) { 380 return; 381 } 382 383 int next_level = level() + 1; 384 GenCollectedHeap* gch = GenCollectedHeap::heap(); 385 assert(next_level < gch->_n_gens, 386 "DefNewGeneration cannot be an oldest gen"); 387 388 Generation* next_gen = gch->_gens[next_level]; 389 size_t old_size = next_gen->capacity(); 390 size_t new_size_before = _virtual_space.committed_size(); 391 size_t min_new_size = spec()->init_size(); 392 size_t max_new_size = reserved().byte_size(); 393 assert(min_new_size <= new_size_before && 394 new_size_before <= max_new_size, 395 "just checking"); 396 // All space sizes must be multiples of Generation::GenGrain. 397 size_t alignment = Generation::GenGrain; 398 399 // Compute desired new generation size based on NewRatio and 400 // NewSizeThreadIncrease 401 size_t desired_new_size = old_size/NewRatio; 402 int threads_count = Threads::number_of_non_daemon_threads(); 403 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; 404 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); 405 406 // Adjust new generation size 407 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 408 assert(desired_new_size <= max_new_size, "just checking"); 409 410 bool changed = false; 411 if (desired_new_size > new_size_before) { 412 size_t change = desired_new_size - new_size_before; 413 assert(change % alignment == 0, "just checking"); 414 if (expand(change)) { 415 changed = true; 416 } 417 // If the heap failed to expand to the desired size, 418 // "changed" will be false. If the expansion failed 419 // (and at this point it was expected to succeed), 420 // ignore the failure (leaving "changed" as false). 421 } 422 if (desired_new_size < new_size_before && eden()->is_empty()) { 423 // bail out of shrinking if objects in eden 424 size_t change = new_size_before - desired_new_size; 425 assert(change % alignment == 0, "just checking"); 426 _virtual_space.shrink_by(change); 427 changed = true; 428 } 429 if (changed) { 430 // The spaces have already been mangled at this point but 431 // may not have been cleared (set top = bottom) and should be. 432 // Mangling was done when the heap was being expanded. 433 compute_space_boundaries(eden()->used(), 434 SpaceDecorator::Clear, 435 SpaceDecorator::DontMangle); 436 MemRegion cmr((HeapWord*)_virtual_space.low(), 437 (HeapWord*)_virtual_space.high()); 438 Universe::heap()->barrier_set()->resize_covered_region(cmr); 439 if (Verbose && PrintGC) { 440 size_t new_size_after = _virtual_space.committed_size(); 441 size_t eden_size_after = eden()->capacity(); 442 size_t survivor_size_after = from()->capacity(); 443 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" 444 SIZE_FORMAT "K [eden=" 445 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 446 new_size_before/K, new_size_after/K, 447 eden_size_after/K, survivor_size_after/K); 448 if (WizardMode) { 449 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 450 thread_increase_size/K, threads_count); 451 } 452 gclog_or_tty->cr(); 453 } 454 } 455 } 456 457 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 458 assert(false, "NYI -- are you sure you want to call this?"); 459 } 460 461 462 size_t DefNewGeneration::capacity() const { 463 return eden()->capacity() 464 + from()->capacity(); // to() is only used during scavenge 465 } 466 467 468 size_t DefNewGeneration::used() const { 469 return eden()->used() 470 + from()->used(); // to() is only used during scavenge 471 } 472 473 474 size_t DefNewGeneration::free() const { 475 return eden()->free() 476 + from()->free(); // to() is only used during scavenge 477 } 478 479 size_t DefNewGeneration::max_capacity() const { 480 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 481 const size_t reserved_bytes = reserved().byte_size(); 482 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 483 } 484 485 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 486 return eden()->free(); 487 } 488 489 size_t DefNewGeneration::capacity_before_gc() const { 490 return eden()->capacity(); 491 } 492 493 size_t DefNewGeneration::contiguous_available() const { 494 return eden()->free(); 495 } 496 497 498 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } 499 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 500 501 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 502 eden()->object_iterate(blk); 503 from()->object_iterate(blk); 504 } 505 506 507 void DefNewGeneration::space_iterate(SpaceClosure* blk, 508 bool usedOnly) { 509 blk->do_space(eden()); 510 blk->do_space(from()); 511 blk->do_space(to()); 512 } 513 514 // The last collection bailed out, we are running out of heap space, 515 // so we try to allocate the from-space, too. 516 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 517 HeapWord* result = NULL; 518 if (Verbose && PrintGCDetails) { 519 gclog_or_tty->print("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):" 520 " will_fail: %s" 521 " heap_lock: %s" 522 " free: " SIZE_FORMAT, 523 size, 524 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 525 "true" : "false", 526 Heap_lock->is_locked() ? "locked" : "unlocked", 527 from()->free()); 528 } 529 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { 530 if (Heap_lock->owned_by_self() || 531 (SafepointSynchronize::is_at_safepoint() && 532 Thread::current()->is_VM_thread())) { 533 // If the Heap_lock is not locked by this thread, this will be called 534 // again later with the Heap_lock held. 535 result = from()->allocate(size); 536 } else if (PrintGC && Verbose) { 537 gclog_or_tty->print_cr(" Heap_lock is not owned by self"); 538 } 539 } else if (PrintGC && Verbose) { 540 gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); 541 } 542 if (PrintGC && Verbose) { 543 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); 544 } 545 return result; 546 } 547 548 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 549 bool is_tlab, 550 bool parallel) { 551 // We don't attempt to expand the young generation (but perhaps we should.) 552 return allocate(size, is_tlab); 553 } 554 555 void DefNewGeneration::adjust_desired_tenuring_threshold() { 556 // Set the desired survivor size to half the real survivor space 557 _tenuring_threshold = 558 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 559 } 560 561 void DefNewGeneration::collect(bool full, 562 bool clear_all_soft_refs, 563 size_t size, 564 bool is_tlab) { 565 assert(full || size > 0, "otherwise we don't want to collect"); 566 567 GenCollectedHeap* gch = GenCollectedHeap::heap(); 568 569 _gc_timer->register_gc_start(); 570 DefNewTracer gc_tracer; 571 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 572 573 _next_gen = gch->next_gen(this); 574 575 // If the next generation is too full to accommodate promotion 576 // from this generation, pass on collection; let the next generation 577 // do it. 578 if (!collection_attempt_is_safe()) { 579 if (Verbose && PrintGCDetails) { 580 gclog_or_tty->print(" :: Collection attempt not safe :: "); 581 } 582 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 583 return; 584 } 585 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 586 587 init_assuming_no_promotion_failure(); 588 589 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); 590 // Capture heap used before collection (for printing). 591 size_t gch_prev_used = gch->used(); 592 593 gch->trace_heap_before_gc(&gc_tracer); 594 595 SpecializationStats::clear(); 596 597 // These can be shared for all code paths 598 IsAliveClosure is_alive(this); 599 ScanWeakRefClosure scan_weak_ref(this); 600 601 age_table()->clear(); 602 to()->clear(SpaceDecorator::Mangle); 603 604 gch->rem_set()->prepare_for_younger_refs_iterate(false); 605 606 assert(gch->no_allocs_since_save_marks(0), 607 "save marks have not been newly set."); 608 609 // Not very pretty. 610 CollectorPolicy* cp = gch->collector_policy(); 611 612 FastScanClosure fsc_with_no_gc_barrier(this, false); 613 FastScanClosure fsc_with_gc_barrier(this, true); 614 615 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, 616 gch->rem_set()->klass_rem_set()); 617 618 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 619 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, 620 &fsc_with_no_gc_barrier, 621 &fsc_with_gc_barrier); 622 623 assert(gch->no_allocs_since_save_marks(0), 624 "save marks have not been newly set."); 625 626 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache; 627 628 gch->gen_process_strong_roots(_level, 629 true, // Process younger gens, if any, 630 // as strong roots. 631 true, // activate StrongRootsScope 632 SharedHeap::ScanningOption(so), 633 &fsc_with_no_gc_barrier, 634 &fsc_with_gc_barrier, 635 &klass_scan_closure); 636 637 // "evacuate followers". 638 evacuate_followers.do_void(); 639 640 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 641 ReferenceProcessor* rp = ref_processor(); 642 rp->setup_policy(clear_all_soft_refs); 643 const ReferenceProcessorStats& stats = 644 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 645 NULL, _gc_timer); 646 gc_tracer.report_gc_reference_stats(stats); 647 648 if (!_promotion_failed) { 649 // Swap the survivor spaces. 650 eden()->clear(SpaceDecorator::Mangle); 651 from()->clear(SpaceDecorator::Mangle); 652 if (ZapUnusedHeapArea) { 653 // This is now done here because of the piece-meal mangling which 654 // can check for valid mangling at intermediate points in the 655 // collection(s). When a minor collection fails to collect 656 // sufficient space resizing of the young generation can occur 657 // an redistribute the spaces in the young generation. Mangle 658 // here so that unzapped regions don't get distributed to 659 // other spaces. 660 to()->mangle_unused_area(); 661 } 662 swap_spaces(); 663 664 assert(to()->is_empty(), "to space should be empty now"); 665 666 adjust_desired_tenuring_threshold(); 667 668 // A successful scavenge should restart the GC time limit count which is 669 // for full GC's. 670 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 671 size_policy->reset_gc_overhead_limit_count(); 672 assert(!gch->incremental_collection_failed(), "Should be clear"); 673 } else { 674 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 675 _promo_failure_scan_stack.clear(true); // Clear cached segments. 676 677 remove_forwarding_pointers(); 678 if (PrintGCDetails) { 679 gclog_or_tty->print(" (promotion failed) "); 680 } 681 // Add to-space to the list of space to compact 682 // when a promotion failure has occurred. In that 683 // case there can be live objects in to-space 684 // as a result of a partial evacuation of eden 685 // and from-space. 686 swap_spaces(); // For uniformity wrt ParNewGeneration. 687 from()->set_next_compaction_space(to()); 688 gch->set_incremental_collection_failed(); 689 690 // Inform the next generation that a promotion failure occurred. 691 _next_gen->promotion_failure_occurred(); 692 gc_tracer.report_promotion_failed(_promotion_failed_info); 693 694 // Reset the PromotionFailureALot counters. 695 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 696 } 697 if (PrintGC && !PrintGCDetails) { 698 gch->print_heap_change(gch_prev_used); 699 } 700 // set new iteration safe limit for the survivor spaces 701 from()->set_concurrent_iteration_safe_limit(from()->top()); 702 to()->set_concurrent_iteration_safe_limit(to()->top()); 703 SpecializationStats::print(); 704 705 // We need to use a monotonically non-decreasing time in ms 706 // or we will see time-warp warnings and os::javaTimeMillis() 707 // does not guarantee monotonicity. 708 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 709 update_time_of_last_gc(now); 710 711 gch->trace_heap_after_gc(&gc_tracer); 712 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 713 714 _gc_timer->register_gc_end(); 715 716 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 717 } 718 719 class RemoveForwardPointerClosure: public ObjectClosure { 720 public: 721 void do_object(oop obj) { 722 obj->init_mark(); 723 } 724 }; 725 726 void DefNewGeneration::init_assuming_no_promotion_failure() { 727 _promotion_failed = false; 728 _promotion_failed_info.reset(); 729 from()->set_next_compaction_space(NULL); 730 } 731 732 void DefNewGeneration::remove_forwarding_pointers() { 733 RemoveForwardPointerClosure rspc; 734 eden()->object_iterate(&rspc); 735 from()->object_iterate(&rspc); 736 737 // Now restore saved marks, if any. 738 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), 739 "should be the same"); 740 while (!_objs_with_preserved_marks.is_empty()) { 741 oop obj = _objs_with_preserved_marks.pop(); 742 markOop m = _preserved_marks_of_objs.pop(); 743 obj->set_mark(m); 744 } 745 _objs_with_preserved_marks.clear(true); 746 _preserved_marks_of_objs.clear(true); 747 } 748 749 void DefNewGeneration::preserve_mark(oop obj, markOop m) { 750 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), 751 "Oversaving!"); 752 _objs_with_preserved_marks.push(obj); 753 _preserved_marks_of_objs.push(m); 754 } 755 756 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 757 if (m->must_be_preserved_for_promotion_failure(obj)) { 758 preserve_mark(obj, m); 759 } 760 } 761 762 void DefNewGeneration::handle_promotion_failure(oop old) { 763 if (PrintPromotionFailure && !_promotion_failed) { 764 gclog_or_tty->print(" (promotion failure size = %d) ", 765 old->size()); 766 } 767 _promotion_failed = true; 768 _promotion_failed_info.register_copy_failure(old->size()); 769 preserve_mark_if_necessary(old, old->mark()); 770 // forward to self 771 old->forward_to(old); 772 773 _promo_failure_scan_stack.push(old); 774 775 if (!_promo_failure_drain_in_progress) { 776 // prevent recursion in copy_to_survivor_space() 777 _promo_failure_drain_in_progress = true; 778 drain_promo_failure_scan_stack(); 779 _promo_failure_drain_in_progress = false; 780 } 781 } 782 783 oop DefNewGeneration::copy_to_survivor_space(oop old) { 784 assert(is_in_reserved(old) && !old->is_forwarded(), 785 "shouldn't be scavenging this oop"); 786 size_t s = old->size(); 787 oop obj = NULL; 788 789 // Try allocating obj in to-space (unless too old) 790 if (old->age() < tenuring_threshold()) { 791 obj = (oop) to()->allocate(s); 792 } 793 794 // Otherwise try allocating obj tenured 795 if (obj == NULL) { 796 obj = _next_gen->promote(old, s); 797 if (obj == NULL) { 798 handle_promotion_failure(old); 799 return old; 800 } 801 } else { 802 // Prefetch beyond obj 803 const intx interval = PrefetchCopyIntervalInBytes; 804 Prefetch::write(obj, interval); 805 806 // Copy obj 807 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 808 809 // Increment age if obj still in new generation 810 obj->incr_age(); 811 age_table()->add(obj, s); 812 } 813 814 // Done, insert forward pointer to obj in this header 815 old->forward_to(obj); 816 817 return obj; 818 } 819 820 void DefNewGeneration::drain_promo_failure_scan_stack() { 821 while (!_promo_failure_scan_stack.is_empty()) { 822 oop obj = _promo_failure_scan_stack.pop(); 823 obj->oop_iterate(_promo_failure_scan_stack_closure); 824 } 825 } 826 827 void DefNewGeneration::save_marks() { 828 eden()->set_saved_mark(); 829 to()->set_saved_mark(); 830 from()->set_saved_mark(); 831 } 832 833 834 void DefNewGeneration::reset_saved_marks() { 835 eden()->reset_saved_mark(); 836 to()->reset_saved_mark(); 837 from()->reset_saved_mark(); 838 } 839 840 841 bool DefNewGeneration::no_allocs_since_save_marks() { 842 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 843 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 844 return to()->saved_mark_at_top(); 845 } 846 847 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 848 \ 849 void DefNewGeneration:: \ 850 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 851 cl->set_generation(this); \ 852 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 853 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 854 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 855 cl->reset_generation(); \ 856 save_marks(); \ 857 } 858 859 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 860 861 #undef DefNew_SINCE_SAVE_MARKS_DEFN 862 863 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 864 size_t max_alloc_words) { 865 if (requestor == this || _promotion_failed) return; 866 assert(requestor->level() > level(), "DefNewGeneration must be youngest"); 867 868 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 869 if (to_space->top() > to_space->bottom()) { 870 trace("to_space not empty when contribute_scratch called"); 871 } 872 */ 873 874 ContiguousSpace* to_space = to(); 875 assert(to_space->end() >= to_space->top(), "pointers out of order"); 876 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 877 if (free_words >= MinFreeScratchWords) { 878 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 879 sb->num_words = free_words; 880 sb->next = list; 881 list = sb; 882 } 883 } 884 885 void DefNewGeneration::reset_scratch() { 886 // If contributing scratch in to_space, mangle all of 887 // to_space if ZapUnusedHeapArea. This is needed because 888 // top is not maintained while using to-space as scratch. 889 if (ZapUnusedHeapArea) { 890 to()->mangle_unused_area_complete(); 891 } 892 } 893 894 bool DefNewGeneration::collection_attempt_is_safe() { 895 if (!to()->is_empty()) { 896 if (Verbose && PrintGCDetails) { 897 gclog_or_tty->print(" :: to is not empty :: "); 898 } 899 return false; 900 } 901 if (_next_gen == NULL) { 902 GenCollectedHeap* gch = GenCollectedHeap::heap(); 903 _next_gen = gch->next_gen(this); 904 } 905 return _next_gen->promotion_attempt_is_safe(used()); 906 } 907 908 void DefNewGeneration::gc_epilogue(bool full) { 909 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 910 911 assert(!GC_locker::is_active(), "We should not be executing here"); 912 // Check if the heap is approaching full after a collection has 913 // been done. Generally the young generation is empty at 914 // a minimum at the end of a collection. If it is not, then 915 // the heap is approaching full. 916 GenCollectedHeap* gch = GenCollectedHeap::heap(); 917 if (full) { 918 DEBUG_ONLY(seen_incremental_collection_failed = false;) 919 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 920 if (Verbose && PrintGCDetails) { 921 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 922 GCCause::to_string(gch->gc_cause())); 923 } 924 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 925 set_should_allocate_from_space(); // we seem to be running out of space 926 } else { 927 if (Verbose && PrintGCDetails) { 928 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 929 GCCause::to_string(gch->gc_cause())); 930 } 931 gch->clear_incremental_collection_failed(); // We just did a full collection 932 clear_should_allocate_from_space(); // if set 933 } 934 } else { 935 #ifdef ASSERT 936 // It is possible that incremental_collection_failed() == true 937 // here, because an attempted scavenge did not succeed. The policy 938 // is normally expected to cause a full collection which should 939 // clear that condition, so we should not be here twice in a row 940 // with incremental_collection_failed() == true without having done 941 // a full collection in between. 942 if (!seen_incremental_collection_failed && 943 gch->incremental_collection_failed()) { 944 if (Verbose && PrintGCDetails) { 945 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 946 GCCause::to_string(gch->gc_cause())); 947 } 948 seen_incremental_collection_failed = true; 949 } else if (seen_incremental_collection_failed) { 950 if (Verbose && PrintGCDetails) { 951 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 952 GCCause::to_string(gch->gc_cause())); 953 } 954 assert(gch->gc_cause() == GCCause::_scavenge_alot || 955 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 956 !gch->incremental_collection_failed(), 957 "Twice in a row"); 958 seen_incremental_collection_failed = false; 959 } 960 #endif // ASSERT 961 } 962 963 if (ZapUnusedHeapArea) { 964 eden()->check_mangled_unused_area_complete(); 965 from()->check_mangled_unused_area_complete(); 966 to()->check_mangled_unused_area_complete(); 967 } 968 969 if (!CleanChunkPoolAsync) { 970 Chunk::clean_chunk_pool(); 971 } 972 973 // update the generation and space performance counters 974 update_counters(); 975 gch->collector_policy()->counters()->update_counters(); 976 } 977 978 void DefNewGeneration::record_spaces_top() { 979 assert(ZapUnusedHeapArea, "Not mangling unused space"); 980 eden()->set_top_for_allocations(); 981 to()->set_top_for_allocations(); 982 from()->set_top_for_allocations(); 983 } 984 985 void DefNewGeneration::ref_processor_init() { 986 Generation::ref_processor_init(); 987 } 988 989 990 void DefNewGeneration::update_counters() { 991 if (UsePerfData) { 992 _eden_counters->update_all(); 993 _from_counters->update_all(); 994 _to_counters->update_all(); 995 _gen_counters->update_all(); 996 } 997 } 998 999 void DefNewGeneration::verify() { 1000 eden()->verify(); 1001 from()->verify(); 1002 to()->verify(); 1003 } 1004 1005 void DefNewGeneration::print_on(outputStream* st) const { 1006 Generation::print_on(st); 1007 st->print(" eden"); 1008 eden()->print_on(st); 1009 st->print(" from"); 1010 from()->print_on(st); 1011 st->print(" to "); 1012 to()->print_on(st); 1013 } 1014 1015 1016 const char* DefNewGeneration::name() const { 1017 return "def new generation"; 1018 } 1019 1020 // Moved from inline file as they are not called inline 1021 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 1022 return eden(); 1023 } 1024 1025 HeapWord* DefNewGeneration::allocate(size_t word_size, 1026 bool is_tlab) { 1027 // This is the slow-path allocation for the DefNewGeneration. 1028 // Most allocations are fast-path in compiled code. 1029 // We try to allocate from the eden. If that works, we are happy. 1030 // Note that since DefNewGeneration supports lock-free allocation, we 1031 // have to use it here, as well. 1032 HeapWord* result = eden()->par_allocate(word_size); 1033 if (result != NULL) { 1034 if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1035 _next_gen->sample_eden_chunk(); 1036 } 1037 return result; 1038 } 1039 do { 1040 HeapWord* old_limit = eden()->soft_end(); 1041 if (old_limit < eden()->end()) { 1042 // Tell the next generation we reached a limit. 1043 HeapWord* new_limit = 1044 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); 1045 if (new_limit != NULL) { 1046 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); 1047 } else { 1048 assert(eden()->soft_end() == eden()->end(), 1049 "invalid state after allocation_limit_reached returned null"); 1050 } 1051 } else { 1052 // The allocation failed and the soft limit is equal to the hard limit, 1053 // there are no reasons to do an attempt to allocate 1054 assert(old_limit == eden()->end(), "sanity check"); 1055 break; 1056 } 1057 // Try to allocate until succeeded or the soft limit can't be adjusted 1058 result = eden()->par_allocate(word_size); 1059 } while (result == NULL); 1060 1061 // If the eden is full and the last collection bailed out, we are running 1062 // out of heap space, and we try to allocate the from-space, too. 1063 // allocate_from_space can't be inlined because that would introduce a 1064 // circular dependency at compile time. 1065 if (result == NULL) { 1066 result = allocate_from_space(word_size); 1067 } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1068 _next_gen->sample_eden_chunk(); 1069 } 1070 return result; 1071 } 1072 1073 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1074 bool is_tlab) { 1075 HeapWord* res = eden()->par_allocate(word_size); 1076 if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1077 _next_gen->sample_eden_chunk(); 1078 } 1079 return res; 1080 } 1081 1082 void DefNewGeneration::gc_prologue(bool full) { 1083 // Ensure that _end and _soft_end are the same in eden space. 1084 eden()->set_soft_end(eden()->end()); 1085 } 1086 1087 size_t DefNewGeneration::tlab_capacity() const { 1088 return eden()->capacity(); 1089 } 1090 1091 size_t DefNewGeneration::tlab_used() const { 1092 return eden()->used(); 1093 } 1094 1095 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1096 return unsafe_max_alloc_nogc(); 1097 }