1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/collectorCounters.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/gcHeapSummary.hpp" 29 #include "gc_implementation/shared/gcTimer.hpp" 30 #include "gc_implementation/shared/gcTraceTime.hpp" 31 #include "gc_implementation/shared/gcTrace.hpp" 32 #include "gc_implementation/shared/spaceDecorator.hpp" 33 #include "memory/defNewGeneration.inline.hpp" 34 #include "memory/gcLocker.inline.hpp" 35 #include "memory/genCollectedHeap.hpp" 36 #include "memory/genOopClosures.inline.hpp" 37 #include "memory/genRemSet.hpp" 38 #include "memory/generationSpec.hpp" 39 #include "memory/iterator.hpp" 40 #include "memory/referencePolicy.hpp" 41 #include "memory/space.inline.hpp" 42 #include "oops/instanceRefKlass.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "runtime/java.hpp" 45 #include "runtime/thread.inline.hpp" 46 #include "utilities/copy.hpp" 47 #include "utilities/stack.inline.hpp" 48 49 // 50 // DefNewGeneration functions. 51 52 // Methods of protected closure types. 53 54 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { 55 assert(g->level() == 0, "Optimized for youngest gen."); 56 } 57 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 58 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); 59 } 60 61 DefNewGeneration::KeepAliveClosure:: 62 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 63 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); 64 _rs = (CardTableRS*)rs; 65 } 66 67 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 68 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 69 70 71 DefNewGeneration::FastKeepAliveClosure:: 72 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 73 DefNewGeneration::KeepAliveClosure(cl) { 74 _boundary = g->reserved().end(); 75 } 76 77 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 78 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 79 80 DefNewGeneration::EvacuateFollowersClosure:: 81 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, 82 ScanClosure* cur, ScanClosure* older) : 83 _gch(gch), _level(level), 84 _scan_cur_or_nonheap(cur), _scan_older(older) 85 {} 86 87 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 88 do { 89 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 90 _scan_older); 91 } while (!_gch->no_allocs_since_save_marks(_level)); 92 } 93 94 DefNewGeneration::FastEvacuateFollowersClosure:: 95 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, 96 DefNewGeneration* gen, 97 FastScanClosure* cur, FastScanClosure* older) : 98 _gch(gch), _level(level), _gen(gen), 99 _scan_cur_or_nonheap(cur), _scan_older(older) 100 {} 101 102 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 103 do { 104 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 105 _scan_older); 106 } while (!_gch->no_allocs_since_save_marks(_level)); 107 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 108 } 109 110 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 111 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 112 { 113 assert(_g->level() == 0, "Optimized for youngest generation"); 114 _boundary = _g->reserved().end(); 115 } 116 117 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 118 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 119 120 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 121 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 122 { 123 assert(_g->level() == 0, "Optimized for youngest generation"); 124 _boundary = _g->reserved().end(); 125 } 126 127 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 128 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 129 130 void KlassScanClosure::do_klass(Klass* klass) { 131 #ifndef PRODUCT 132 if (TraceScavenge) { 133 ResourceMark rm; 134 gclog_or_tty->print_cr("KlassScanClosure::do_klass %p, %s, dirty: %s", 135 klass, 136 klass->external_name(), 137 klass->has_modified_oops() ? "true" : "false"); 138 } 139 #endif 140 141 // If the klass has not been dirtied we know that there's 142 // no references into the young gen and we can skip it. 143 if (klass->has_modified_oops()) { 144 if (_accumulate_modified_oops) { 145 klass->accumulate_modified_oops(); 146 } 147 148 // Clear this state since we're going to scavenge all the metadata. 149 klass->clear_modified_oops(); 150 151 // Tell the closure which Klass is being scanned so that it can be dirtied 152 // if oops are left pointing into the young gen. 153 _scavenge_closure->set_scanned_klass(klass); 154 155 klass->oops_do(_scavenge_closure); 156 157 _scavenge_closure->set_scanned_klass(NULL); 158 } 159 } 160 161 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 162 _g(g) 163 { 164 assert(_g->level() == 0, "Optimized for youngest generation"); 165 _boundary = _g->reserved().end(); 166 } 167 168 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 169 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 170 171 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 172 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 173 174 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, 175 KlassRemSet* klass_rem_set) 176 : _scavenge_closure(scavenge_closure), 177 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} 178 179 180 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 181 size_t initial_size, 182 int level, 183 const char* policy) 184 : Generation(rs, initial_size, level), 185 _promo_failure_drain_in_progress(false), 186 _should_allocate_from_space(false) 187 { 188 MemRegion cmr((HeapWord*)_virtual_space.low(), 189 (HeapWord*)_virtual_space.high()); 190 Universe::heap()->barrier_set()->resize_covered_region(cmr); 191 192 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { 193 _eden_space = new ConcEdenSpace(this); 194 } else { 195 _eden_space = new EdenSpace(this); 196 } 197 _from_space = new ContiguousSpace(); 198 _to_space = new ContiguousSpace(); 199 200 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) 201 vm_exit_during_initialization("Could not allocate a new gen space"); 202 203 // Compute the maximum eden and survivor space sizes. These sizes 204 // are computed assuming the entire reserved space is committed. 205 // These values are exported as performance counters. 206 uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 207 uintx size = _virtual_space.reserved_size(); 208 _max_survivor_size = compute_survivor_size(size, alignment); 209 _max_eden_size = size - (2*_max_survivor_size); 210 211 // allocate the performance counters 212 213 // Generation counters -- generation 0, 3 subspaces 214 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); 215 _gc_counters = new CollectorCounters(policy, 0); 216 217 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 218 _gen_counters); 219 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 220 _gen_counters); 221 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 222 _gen_counters); 223 224 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 225 update_counters(); 226 _next_gen = NULL; 227 _tenuring_threshold = MaxTenuringThreshold; 228 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 229 230 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 231 } 232 233 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 234 bool clear_space, 235 bool mangle_space) { 236 uintx alignment = 237 GenCollectedHeap::heap()->collector_policy()->space_alignment(); 238 239 // If the spaces are being cleared (only done at heap initialization 240 // currently), the survivor spaces need not be empty. 241 // Otherwise, no care is taken for used areas in the survivor spaces 242 // so check. 243 assert(clear_space || (to()->is_empty() && from()->is_empty()), 244 "Initialization of the survivor spaces assumes these are empty"); 245 246 // Compute sizes 247 uintx size = _virtual_space.committed_size(); 248 uintx survivor_size = compute_survivor_size(size, alignment); 249 uintx eden_size = size - (2*survivor_size); 250 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 251 252 if (eden_size < minimum_eden_size) { 253 // May happen due to 64Kb rounding, if so adjust eden size back up 254 minimum_eden_size = align_size_up(minimum_eden_size, alignment); 255 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 256 uintx unaligned_survivor_size = 257 align_size_down(maximum_survivor_size, alignment); 258 survivor_size = MAX2(unaligned_survivor_size, alignment); 259 eden_size = size - (2*survivor_size); 260 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 261 assert(eden_size >= minimum_eden_size, "just checking"); 262 } 263 264 char *eden_start = _virtual_space.low(); 265 char *from_start = eden_start + eden_size; 266 char *to_start = from_start + survivor_size; 267 char *to_end = to_start + survivor_size; 268 269 assert(to_end == _virtual_space.high(), "just checking"); 270 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); 271 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); 272 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); 273 274 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 275 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 276 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 277 278 // A minimum eden size implies that there is a part of eden that 279 // is being used and that affects the initialization of any 280 // newly formed eden. 281 bool live_in_eden = minimum_eden_size > 0; 282 283 // If not clearing the spaces, do some checking to verify that 284 // the space are already mangled. 285 if (!clear_space) { 286 // Must check mangling before the spaces are reshaped. Otherwise, 287 // the bottom or end of one space may have moved into another 288 // a failure of the check may not correctly indicate which space 289 // is not properly mangled. 290 if (ZapUnusedHeapArea) { 291 HeapWord* limit = (HeapWord*) _virtual_space.high(); 292 eden()->check_mangled_unused_area(limit); 293 from()->check_mangled_unused_area(limit); 294 to()->check_mangled_unused_area(limit); 295 } 296 } 297 298 // Reset the spaces for their new regions. 299 eden()->initialize(edenMR, 300 clear_space && !live_in_eden, 301 SpaceDecorator::Mangle); 302 // If clear_space and live_in_eden, we will not have cleared any 303 // portion of eden above its top. This can cause newly 304 // expanded space not to be mangled if using ZapUnusedHeapArea. 305 // We explicitly do such mangling here. 306 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 307 eden()->mangle_unused_area(); 308 } 309 from()->initialize(fromMR, clear_space, mangle_space); 310 to()->initialize(toMR, clear_space, mangle_space); 311 312 // Set next compaction spaces. 313 eden()->set_next_compaction_space(from()); 314 // The to-space is normally empty before a compaction so need 315 // not be considered. The exception is during promotion 316 // failure handling when to-space can contain live objects. 317 from()->set_next_compaction_space(NULL); 318 } 319 320 void DefNewGeneration::swap_spaces() { 321 ContiguousSpace* s = from(); 322 _from_space = to(); 323 _to_space = s; 324 eden()->set_next_compaction_space(from()); 325 // The to-space is normally empty before a compaction so need 326 // not be considered. The exception is during promotion 327 // failure handling when to-space can contain live objects. 328 from()->set_next_compaction_space(NULL); 329 330 if (UsePerfData) { 331 CSpaceCounters* c = _from_counters; 332 _from_counters = _to_counters; 333 _to_counters = c; 334 } 335 } 336 337 bool DefNewGeneration::expand(size_t bytes) { 338 MutexLocker x(ExpandHeap_lock); 339 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 340 bool success = _virtual_space.expand_by(bytes); 341 if (success && ZapUnusedHeapArea) { 342 // Mangle newly committed space immediately because it 343 // can be done here more simply that after the new 344 // spaces have been computed. 345 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 346 MemRegion mangle_region(prev_high, new_high); 347 SpaceMangler::mangle_region(mangle_region); 348 } 349 350 // Do not attempt an expand-to-the reserve size. The 351 // request should properly observe the maximum size of 352 // the generation so an expand-to-reserve should be 353 // unnecessary. Also a second call to expand-to-reserve 354 // value potentially can cause an undue expansion. 355 // For example if the first expand fail for unknown reasons, 356 // but the second succeeds and expands the heap to its maximum 357 // value. 358 if (GC_locker::is_active()) { 359 if (PrintGC && Verbose) { 360 gclog_or_tty->print_cr("Garbage collection disabled, " 361 "expanded heap instead"); 362 } 363 } 364 365 return success; 366 } 367 368 369 void DefNewGeneration::compute_new_size() { 370 // This is called after a gc that includes the following generation 371 // (which is required to exist.) So from-space will normally be empty. 372 // Note that we check both spaces, since if scavenge failed they revert roles. 373 // If not we bail out (otherwise we would have to relocate the objects) 374 if (!from()->is_empty() || !to()->is_empty()) { 375 return; 376 } 377 378 int next_level = level() + 1; 379 GenCollectedHeap* gch = GenCollectedHeap::heap(); 380 assert(next_level < gch->_n_gens, 381 "DefNewGeneration cannot be an oldest gen"); 382 383 Generation* next_gen = gch->_gens[next_level]; 384 size_t old_size = next_gen->capacity(); 385 size_t new_size_before = _virtual_space.committed_size(); 386 size_t min_new_size = spec()->init_size(); 387 size_t max_new_size = reserved().byte_size(); 388 assert(min_new_size <= new_size_before && 389 new_size_before <= max_new_size, 390 "just checking"); 391 // All space sizes must be multiples of Generation::GenGrain. 392 size_t alignment = Generation::GenGrain; 393 394 // Compute desired new generation size based on NewRatio and 395 // NewSizeThreadIncrease 396 size_t desired_new_size = old_size/NewRatio; 397 int threads_count = Threads::number_of_non_daemon_threads(); 398 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; 399 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); 400 401 // Adjust new generation size 402 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 403 assert(desired_new_size <= max_new_size, "just checking"); 404 405 bool changed = false; 406 if (desired_new_size > new_size_before) { 407 size_t change = desired_new_size - new_size_before; 408 assert(change % alignment == 0, "just checking"); 409 if (expand(change)) { 410 changed = true; 411 } 412 // If the heap failed to expand to the desired size, 413 // "changed" will be false. If the expansion failed 414 // (and at this point it was expected to succeed), 415 // ignore the failure (leaving "changed" as false). 416 } 417 if (desired_new_size < new_size_before && eden()->is_empty()) { 418 // bail out of shrinking if objects in eden 419 size_t change = new_size_before - desired_new_size; 420 assert(change % alignment == 0, "just checking"); 421 _virtual_space.shrink_by(change); 422 changed = true; 423 } 424 if (changed) { 425 // The spaces have already been mangled at this point but 426 // may not have been cleared (set top = bottom) and should be. 427 // Mangling was done when the heap was being expanded. 428 compute_space_boundaries(eden()->used(), 429 SpaceDecorator::Clear, 430 SpaceDecorator::DontMangle); 431 MemRegion cmr((HeapWord*)_virtual_space.low(), 432 (HeapWord*)_virtual_space.high()); 433 Universe::heap()->barrier_set()->resize_covered_region(cmr); 434 if (Verbose && PrintGC) { 435 size_t new_size_after = _virtual_space.committed_size(); 436 size_t eden_size_after = eden()->capacity(); 437 size_t survivor_size_after = from()->capacity(); 438 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" 439 SIZE_FORMAT "K [eden=" 440 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 441 new_size_before/K, new_size_after/K, 442 eden_size_after/K, survivor_size_after/K); 443 if (WizardMode) { 444 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 445 thread_increase_size/K, threads_count); 446 } 447 gclog_or_tty->cr(); 448 } 449 } 450 } 451 452 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 453 assert(false, "NYI -- are you sure you want to call this?"); 454 } 455 456 457 size_t DefNewGeneration::capacity() const { 458 return eden()->capacity() 459 + from()->capacity(); // to() is only used during scavenge 460 } 461 462 463 size_t DefNewGeneration::used() const { 464 return eden()->used() 465 + from()->used(); // to() is only used during scavenge 466 } 467 468 469 size_t DefNewGeneration::free() const { 470 return eden()->free() 471 + from()->free(); // to() is only used during scavenge 472 } 473 474 size_t DefNewGeneration::max_capacity() const { 475 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 476 const size_t reserved_bytes = reserved().byte_size(); 477 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 478 } 479 480 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 481 return eden()->free(); 482 } 483 484 size_t DefNewGeneration::capacity_before_gc() const { 485 return eden()->capacity(); 486 } 487 488 size_t DefNewGeneration::contiguous_available() const { 489 return eden()->free(); 490 } 491 492 493 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } 494 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 495 496 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 497 eden()->object_iterate(blk); 498 from()->object_iterate(blk); 499 } 500 501 502 void DefNewGeneration::space_iterate(SpaceClosure* blk, 503 bool usedOnly) { 504 blk->do_space(eden()); 505 blk->do_space(from()); 506 blk->do_space(to()); 507 } 508 509 // The last collection bailed out, we are running out of heap space, 510 // so we try to allocate the from-space, too. 511 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 512 HeapWord* result = NULL; 513 if (Verbose && PrintGCDetails) { 514 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" 515 " will_fail: %s" 516 " heap_lock: %s" 517 " free: " SIZE_FORMAT, 518 size, 519 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 520 "true" : "false", 521 Heap_lock->is_locked() ? "locked" : "unlocked", 522 from()->free()); 523 } 524 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { 525 if (Heap_lock->owned_by_self() || 526 (SafepointSynchronize::is_at_safepoint() && 527 Thread::current()->is_VM_thread())) { 528 // If the Heap_lock is not locked by this thread, this will be called 529 // again later with the Heap_lock held. 530 result = from()->allocate(size); 531 } else if (PrintGC && Verbose) { 532 gclog_or_tty->print_cr(" Heap_lock is not owned by self"); 533 } 534 } else if (PrintGC && Verbose) { 535 gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); 536 } 537 if (PrintGC && Verbose) { 538 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); 539 } 540 return result; 541 } 542 543 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 544 bool is_tlab, 545 bool parallel) { 546 // We don't attempt to expand the young generation (but perhaps we should.) 547 return allocate(size, is_tlab); 548 } 549 550 void DefNewGeneration::adjust_desired_tenuring_threshold() { 551 // Set the desired survivor size to half the real survivor space 552 _tenuring_threshold = 553 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 554 } 555 556 void DefNewGeneration::collect(bool full, 557 bool clear_all_soft_refs, 558 size_t size, 559 bool is_tlab) { 560 assert(full || size > 0, "otherwise we don't want to collect"); 561 562 GenCollectedHeap* gch = GenCollectedHeap::heap(); 563 564 _gc_timer->register_gc_start(); 565 DefNewTracer gc_tracer; 566 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 567 568 _next_gen = gch->next_gen(this); 569 570 // If the next generation is too full to accommodate promotion 571 // from this generation, pass on collection; let the next generation 572 // do it. 573 if (!collection_attempt_is_safe()) { 574 if (Verbose && PrintGCDetails) { 575 gclog_or_tty->print(" :: Collection attempt not safe :: "); 576 } 577 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 578 return; 579 } 580 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 581 582 init_assuming_no_promotion_failure(); 583 584 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); 585 // Capture heap used before collection (for printing). 586 size_t gch_prev_used = gch->used(); 587 588 gch->trace_heap_before_gc(&gc_tracer); 589 590 SpecializationStats::clear(); 591 592 // These can be shared for all code paths 593 IsAliveClosure is_alive(this); 594 ScanWeakRefClosure scan_weak_ref(this); 595 596 age_table()->clear(); 597 to()->clear(SpaceDecorator::Mangle); 598 599 gch->rem_set()->prepare_for_younger_refs_iterate(false); 600 601 assert(gch->no_allocs_since_save_marks(0), 602 "save marks have not been newly set."); 603 604 // Not very pretty. 605 CollectorPolicy* cp = gch->collector_policy(); 606 607 FastScanClosure fsc_with_no_gc_barrier(this, false); 608 FastScanClosure fsc_with_gc_barrier(this, true); 609 610 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, 611 gch->rem_set()->klass_rem_set()); 612 613 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 614 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, 615 &fsc_with_no_gc_barrier, 616 &fsc_with_gc_barrier); 617 618 assert(gch->no_allocs_since_save_marks(0), 619 "save marks have not been newly set."); 620 621 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache; 622 623 gch->gen_process_strong_roots(_level, 624 true, // Process younger gens, if any, 625 // as strong roots. 626 true, // activate StrongRootsScope 627 SharedHeap::ScanningOption(so), 628 &fsc_with_no_gc_barrier, 629 true, // walk *all* scavengable nmethods 630 &fsc_with_gc_barrier, 631 &klass_scan_closure); 632 633 // "evacuate followers". 634 evacuate_followers.do_void(); 635 636 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 637 ReferenceProcessor* rp = ref_processor(); 638 rp->setup_policy(clear_all_soft_refs); 639 const ReferenceProcessorStats& stats = 640 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 641 NULL, _gc_timer); 642 gc_tracer.report_gc_reference_stats(stats); 643 644 if (!_promotion_failed) { 645 // Swap the survivor spaces. 646 eden()->clear(SpaceDecorator::Mangle); 647 from()->clear(SpaceDecorator::Mangle); 648 if (ZapUnusedHeapArea) { 649 // This is now done here because of the piece-meal mangling which 650 // can check for valid mangling at intermediate points in the 651 // collection(s). When a minor collection fails to collect 652 // sufficient space resizing of the young generation can occur 653 // an redistribute the spaces in the young generation. Mangle 654 // here so that unzapped regions don't get distributed to 655 // other spaces. 656 to()->mangle_unused_area(); 657 } 658 swap_spaces(); 659 660 assert(to()->is_empty(), "to space should be empty now"); 661 662 adjust_desired_tenuring_threshold(); 663 664 // A successful scavenge should restart the GC time limit count which is 665 // for full GC's. 666 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 667 size_policy->reset_gc_overhead_limit_count(); 668 assert(!gch->incremental_collection_failed(), "Should be clear"); 669 } else { 670 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 671 _promo_failure_scan_stack.clear(true); // Clear cached segments. 672 673 remove_forwarding_pointers(); 674 if (PrintGCDetails) { 675 gclog_or_tty->print(" (promotion failed) "); 676 } 677 // Add to-space to the list of space to compact 678 // when a promotion failure has occurred. In that 679 // case there can be live objects in to-space 680 // as a result of a partial evacuation of eden 681 // and from-space. 682 swap_spaces(); // For uniformity wrt ParNewGeneration. 683 from()->set_next_compaction_space(to()); 684 gch->set_incremental_collection_failed(); 685 686 // Inform the next generation that a promotion failure occurred. 687 _next_gen->promotion_failure_occurred(); 688 gc_tracer.report_promotion_failed(_promotion_failed_info); 689 690 // Reset the PromotionFailureALot counters. 691 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 692 } 693 if (PrintGC && !PrintGCDetails) { 694 gch->print_heap_change(gch_prev_used); 695 } 696 // set new iteration safe limit for the survivor spaces 697 from()->set_concurrent_iteration_safe_limit(from()->top()); 698 to()->set_concurrent_iteration_safe_limit(to()->top()); 699 SpecializationStats::print(); 700 701 // We need to use a monotonically non-decreasing time in ms 702 // or we will see time-warp warnings and os::javaTimeMillis() 703 // does not guarantee monotonicity. 704 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 705 update_time_of_last_gc(now); 706 707 gch->trace_heap_after_gc(&gc_tracer); 708 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 709 710 _gc_timer->register_gc_end(); 711 712 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 713 } 714 715 class RemoveForwardPointerClosure: public ObjectClosure { 716 public: 717 void do_object(oop obj) { 718 obj->init_mark(); 719 } 720 }; 721 722 void DefNewGeneration::init_assuming_no_promotion_failure() { 723 _promotion_failed = false; 724 _promotion_failed_info.reset(); 725 from()->set_next_compaction_space(NULL); 726 } 727 728 void DefNewGeneration::remove_forwarding_pointers() { 729 RemoveForwardPointerClosure rspc; 730 eden()->object_iterate(&rspc); 731 from()->object_iterate(&rspc); 732 733 // Now restore saved marks, if any. 734 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), 735 "should be the same"); 736 while (!_objs_with_preserved_marks.is_empty()) { 737 oop obj = _objs_with_preserved_marks.pop(); 738 markOop m = _preserved_marks_of_objs.pop(); 739 obj->set_mark(m); 740 } 741 _objs_with_preserved_marks.clear(true); 742 _preserved_marks_of_objs.clear(true); 743 } 744 745 void DefNewGeneration::preserve_mark(oop obj, markOop m) { 746 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), 747 "Oversaving!"); 748 _objs_with_preserved_marks.push(obj); 749 _preserved_marks_of_objs.push(m); 750 } 751 752 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 753 if (m->must_be_preserved_for_promotion_failure(obj)) { 754 preserve_mark(obj, m); 755 } 756 } 757 758 void DefNewGeneration::handle_promotion_failure(oop old) { 759 if (PrintPromotionFailure && !_promotion_failed) { 760 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ", 761 old->size()); 762 } 763 _promotion_failed = true; 764 _promotion_failed_info.register_copy_failure(old->size()); 765 preserve_mark_if_necessary(old, old->mark()); 766 // forward to self 767 old->forward_to(old); 768 769 _promo_failure_scan_stack.push(old); 770 771 if (!_promo_failure_drain_in_progress) { 772 // prevent recursion in copy_to_survivor_space() 773 _promo_failure_drain_in_progress = true; 774 drain_promo_failure_scan_stack(); 775 _promo_failure_drain_in_progress = false; 776 } 777 } 778 779 oop DefNewGeneration::copy_to_survivor_space(oop old) { 780 assert(is_in_reserved(old) && !old->is_forwarded(), 781 "shouldn't be scavenging this oop"); 782 size_t s = old->size(); 783 oop obj = NULL; 784 785 // Try allocating obj in to-space (unless too old) 786 if (old->age() < tenuring_threshold()) { 787 obj = (oop) to()->allocate(s); 788 } 789 790 // Otherwise try allocating obj tenured 791 if (obj == NULL) { 792 obj = _next_gen->promote(old, s); 793 if (obj == NULL) { 794 handle_promotion_failure(old); 795 return old; 796 } 797 } else { 798 // Prefetch beyond obj 799 const intx interval = PrefetchCopyIntervalInBytes; 800 Prefetch::write(obj, interval); 801 802 // Copy obj 803 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 804 805 // Increment age if obj still in new generation 806 obj->incr_age(); 807 age_table()->add(obj, s); 808 } 809 810 // Done, insert forward pointer to obj in this header 811 old->forward_to(obj); 812 813 return obj; 814 } 815 816 void DefNewGeneration::drain_promo_failure_scan_stack() { 817 while (!_promo_failure_scan_stack.is_empty()) { 818 oop obj = _promo_failure_scan_stack.pop(); 819 obj->oop_iterate(_promo_failure_scan_stack_closure); 820 } 821 } 822 823 void DefNewGeneration::save_marks() { 824 eden()->set_saved_mark(); 825 to()->set_saved_mark(); 826 from()->set_saved_mark(); 827 } 828 829 830 void DefNewGeneration::reset_saved_marks() { 831 eden()->reset_saved_mark(); 832 to()->reset_saved_mark(); 833 from()->reset_saved_mark(); 834 } 835 836 837 bool DefNewGeneration::no_allocs_since_save_marks() { 838 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 839 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 840 return to()->saved_mark_at_top(); 841 } 842 843 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 844 \ 845 void DefNewGeneration:: \ 846 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 847 cl->set_generation(this); \ 848 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 849 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 850 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 851 cl->reset_generation(); \ 852 save_marks(); \ 853 } 854 855 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 856 857 #undef DefNew_SINCE_SAVE_MARKS_DEFN 858 859 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 860 size_t max_alloc_words) { 861 if (requestor == this || _promotion_failed) return; 862 assert(requestor->level() > level(), "DefNewGeneration must be youngest"); 863 864 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 865 if (to_space->top() > to_space->bottom()) { 866 trace("to_space not empty when contribute_scratch called"); 867 } 868 */ 869 870 ContiguousSpace* to_space = to(); 871 assert(to_space->end() >= to_space->top(), "pointers out of order"); 872 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 873 if (free_words >= MinFreeScratchWords) { 874 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 875 sb->num_words = free_words; 876 sb->next = list; 877 list = sb; 878 } 879 } 880 881 void DefNewGeneration::reset_scratch() { 882 // If contributing scratch in to_space, mangle all of 883 // to_space if ZapUnusedHeapArea. This is needed because 884 // top is not maintained while using to-space as scratch. 885 if (ZapUnusedHeapArea) { 886 to()->mangle_unused_area_complete(); 887 } 888 } 889 890 bool DefNewGeneration::collection_attempt_is_safe() { 891 if (!to()->is_empty()) { 892 if (Verbose && PrintGCDetails) { 893 gclog_or_tty->print(" :: to is not empty :: "); 894 } 895 return false; 896 } 897 if (_next_gen == NULL) { 898 GenCollectedHeap* gch = GenCollectedHeap::heap(); 899 _next_gen = gch->next_gen(this); 900 } 901 return _next_gen->promotion_attempt_is_safe(used()); 902 } 903 904 void DefNewGeneration::gc_epilogue(bool full) { 905 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 906 907 assert(!GC_locker::is_active(), "We should not be executing here"); 908 // Check if the heap is approaching full after a collection has 909 // been done. Generally the young generation is empty at 910 // a minimum at the end of a collection. If it is not, then 911 // the heap is approaching full. 912 GenCollectedHeap* gch = GenCollectedHeap::heap(); 913 if (full) { 914 DEBUG_ONLY(seen_incremental_collection_failed = false;) 915 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 916 if (Verbose && PrintGCDetails) { 917 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 918 GCCause::to_string(gch->gc_cause())); 919 } 920 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 921 set_should_allocate_from_space(); // we seem to be running out of space 922 } else { 923 if (Verbose && PrintGCDetails) { 924 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 925 GCCause::to_string(gch->gc_cause())); 926 } 927 gch->clear_incremental_collection_failed(); // We just did a full collection 928 clear_should_allocate_from_space(); // if set 929 } 930 } else { 931 #ifdef ASSERT 932 // It is possible that incremental_collection_failed() == true 933 // here, because an attempted scavenge did not succeed. The policy 934 // is normally expected to cause a full collection which should 935 // clear that condition, so we should not be here twice in a row 936 // with incremental_collection_failed() == true without having done 937 // a full collection in between. 938 if (!seen_incremental_collection_failed && 939 gch->incremental_collection_failed()) { 940 if (Verbose && PrintGCDetails) { 941 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 942 GCCause::to_string(gch->gc_cause())); 943 } 944 seen_incremental_collection_failed = true; 945 } else if (seen_incremental_collection_failed) { 946 if (Verbose && PrintGCDetails) { 947 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 948 GCCause::to_string(gch->gc_cause())); 949 } 950 assert(gch->gc_cause() == GCCause::_scavenge_alot || 951 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 952 !gch->incremental_collection_failed(), 953 "Twice in a row"); 954 seen_incremental_collection_failed = false; 955 } 956 #endif // ASSERT 957 } 958 959 if (ZapUnusedHeapArea) { 960 eden()->check_mangled_unused_area_complete(); 961 from()->check_mangled_unused_area_complete(); 962 to()->check_mangled_unused_area_complete(); 963 } 964 965 if (!CleanChunkPoolAsync) { 966 Chunk::clean_chunk_pool(); 967 } 968 969 // update the generation and space performance counters 970 update_counters(); 971 gch->collector_policy()->counters()->update_counters(); 972 } 973 974 void DefNewGeneration::record_spaces_top() { 975 assert(ZapUnusedHeapArea, "Not mangling unused space"); 976 eden()->set_top_for_allocations(); 977 to()->set_top_for_allocations(); 978 from()->set_top_for_allocations(); 979 } 980 981 void DefNewGeneration::ref_processor_init() { 982 Generation::ref_processor_init(); 983 } 984 985 986 void DefNewGeneration::update_counters() { 987 if (UsePerfData) { 988 _eden_counters->update_all(); 989 _from_counters->update_all(); 990 _to_counters->update_all(); 991 _gen_counters->update_all(); 992 } 993 } 994 995 void DefNewGeneration::verify() { 996 eden()->verify(); 997 from()->verify(); 998 to()->verify(); 999 } 1000 1001 void DefNewGeneration::print_on(outputStream* st) const { 1002 Generation::print_on(st); 1003 st->print(" eden"); 1004 eden()->print_on(st); 1005 st->print(" from"); 1006 from()->print_on(st); 1007 st->print(" to "); 1008 to()->print_on(st); 1009 } 1010 1011 1012 const char* DefNewGeneration::name() const { 1013 return "def new generation"; 1014 } 1015 1016 // Moved from inline file as they are not called inline 1017 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 1018 return eden(); 1019 } 1020 1021 HeapWord* DefNewGeneration::allocate(size_t word_size, 1022 bool is_tlab) { 1023 // This is the slow-path allocation for the DefNewGeneration. 1024 // Most allocations are fast-path in compiled code. 1025 // We try to allocate from the eden. If that works, we are happy. 1026 // Note that since DefNewGeneration supports lock-free allocation, we 1027 // have to use it here, as well. 1028 HeapWord* result = eden()->par_allocate(word_size); 1029 if (result != NULL) { 1030 if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1031 _next_gen->sample_eden_chunk(); 1032 } 1033 return result; 1034 } 1035 do { 1036 HeapWord* old_limit = eden()->soft_end(); 1037 if (old_limit < eden()->end()) { 1038 // Tell the next generation we reached a limit. 1039 HeapWord* new_limit = 1040 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); 1041 if (new_limit != NULL) { 1042 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); 1043 } else { 1044 assert(eden()->soft_end() == eden()->end(), 1045 "invalid state after allocation_limit_reached returned null"); 1046 } 1047 } else { 1048 // The allocation failed and the soft limit is equal to the hard limit, 1049 // there are no reasons to do an attempt to allocate 1050 assert(old_limit == eden()->end(), "sanity check"); 1051 break; 1052 } 1053 // Try to allocate until succeeded or the soft limit can't be adjusted 1054 result = eden()->par_allocate(word_size); 1055 } while (result == NULL); 1056 1057 // If the eden is full and the last collection bailed out, we are running 1058 // out of heap space, and we try to allocate the from-space, too. 1059 // allocate_from_space can't be inlined because that would introduce a 1060 // circular dependency at compile time. 1061 if (result == NULL) { 1062 result = allocate_from_space(word_size); 1063 } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1064 _next_gen->sample_eden_chunk(); 1065 } 1066 return result; 1067 } 1068 1069 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1070 bool is_tlab) { 1071 HeapWord* res = eden()->par_allocate(word_size); 1072 if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1073 _next_gen->sample_eden_chunk(); 1074 } 1075 return res; 1076 } 1077 1078 void DefNewGeneration::gc_prologue(bool full) { 1079 // Ensure that _end and _soft_end are the same in eden space. 1080 eden()->set_soft_end(eden()->end()); 1081 } 1082 1083 size_t DefNewGeneration::tlab_capacity() const { 1084 return eden()->capacity(); 1085 } 1086 1087 size_t DefNewGeneration::tlab_used() const { 1088 return eden()->used(); 1089 } 1090 1091 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1092 return unsafe_max_alloc_nogc(); 1093 }