1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/collectorCounters.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/gcHeapSummary.hpp" 29 #include "gc_implementation/shared/gcTimer.hpp" 30 #include "gc_implementation/shared/gcTraceTime.hpp" 31 #include "gc_implementation/shared/gcTrace.hpp" 32 #include "gc_implementation/shared/spaceDecorator.hpp" 33 #include "memory/defNewGeneration.inline.hpp" 34 #include "memory/gcLocker.inline.hpp" 35 #include "memory/genCollectedHeap.hpp" 36 #include "memory/genOopClosures.inline.hpp" 37 #include "memory/generationSpec.hpp" 38 #include "memory/iterator.hpp" 39 #include "memory/referencePolicy.hpp" 40 #include "memory/space.inline.hpp" 41 #include "oops/instanceRefKlass.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/java.hpp" 44 #include "utilities/copy.hpp" 45 #include "utilities/stack.inline.hpp" 46 #ifdef TARGET_OS_FAMILY_linux 47 # include "thread_linux.inline.hpp" 48 #endif 49 #ifdef TARGET_OS_FAMILY_solaris 50 # include "thread_solaris.inline.hpp" 51 #endif 52 #ifdef TARGET_OS_FAMILY_windows 53 # include "thread_windows.inline.hpp" 54 #endif 55 #ifdef TARGET_OS_FAMILY_bsd 56 # include "thread_bsd.inline.hpp" 57 #endif 58 59 // 60 // DefNewGeneration functions. 61 62 // Methods of protected closure types. 63 64 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { 65 assert(g->level() == 0, "Optimized for youngest gen."); 66 } 67 void DefNewGeneration::IsAliveClosure::do_object(oop p) { 68 assert(false, "Do not call."); 69 } 70 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 71 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); 72 } 73 74 DefNewGeneration::KeepAliveClosure:: 75 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 76 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); 77 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); 78 _rs = (CardTableRS*)rs; 79 } 80 81 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 82 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 83 84 85 DefNewGeneration::FastKeepAliveClosure:: 86 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 87 DefNewGeneration::KeepAliveClosure(cl) { 88 _boundary = g->reserved().end(); 89 } 90 91 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 92 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 93 94 DefNewGeneration::EvacuateFollowersClosure:: 95 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, 96 ScanClosure* cur, ScanClosure* older) : 97 _gch(gch), _level(level), 98 _scan_cur_or_nonheap(cur), _scan_older(older) 99 {} 100 101 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 102 do { 103 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 104 _scan_older); 105 } while (!_gch->no_allocs_since_save_marks(_level)); 106 } 107 108 DefNewGeneration::FastEvacuateFollowersClosure:: 109 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, 110 DefNewGeneration* gen, 111 FastScanClosure* cur, FastScanClosure* older) : 112 _gch(gch), _level(level), _gen(gen), 113 _scan_cur_or_nonheap(cur), _scan_older(older) 114 {} 115 116 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 117 do { 118 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 119 _scan_older); 120 } while (!_gch->no_allocs_since_save_marks(_level)); 121 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 122 } 123 124 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 125 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) 126 { 127 assert(_g->level() == 0, "Optimized for youngest generation"); 128 _boundary = _g->reserved().end(); 129 } 130 131 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 132 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 133 134 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 135 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) 136 { 137 assert(_g->level() == 0, "Optimized for youngest generation"); 138 _boundary = _g->reserved().end(); 139 } 140 141 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 142 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 143 144 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 145 OopClosure(g->ref_processor()), _g(g) 146 { 147 assert(_g->level() == 0, "Optimized for youngest generation"); 148 _boundary = _g->reserved().end(); 149 } 150 151 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 152 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 153 154 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 155 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 156 157 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 158 size_t initial_size, 159 int level, 160 const char* policy) 161 : Generation(rs, initial_size, level), 162 _promo_failure_drain_in_progress(false), 163 _should_allocate_from_space(false) 164 { 165 MemRegion cmr((HeapWord*)_virtual_space.low(), 166 (HeapWord*)_virtual_space.high()); 167 Universe::heap()->barrier_set()->resize_covered_region(cmr); 168 169 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { 170 _eden_space = new ConcEdenSpace(this); 171 } else { 172 _eden_space = new EdenSpace(this); 173 } 174 _from_space = new ContiguousSpace(); 175 _to_space = new ContiguousSpace(); 176 177 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) 178 vm_exit_during_initialization("Could not allocate a new gen space"); 179 180 // Compute the maximum eden and survivor space sizes. These sizes 181 // are computed assuming the entire reserved space is committed. 182 // These values are exported as performance counters. 183 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); 184 uintx size = _virtual_space.reserved_size(); 185 _max_survivor_size = compute_survivor_size(size, alignment); 186 _max_eden_size = size - (2*_max_survivor_size); 187 188 // allocate the performance counters 189 190 // Generation counters -- generation 0, 3 subspaces 191 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); 192 _gc_counters = new CollectorCounters(policy, 0); 193 194 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 195 _gen_counters); 196 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 197 _gen_counters); 198 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 199 _gen_counters); 200 201 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 202 update_counters(); 203 _next_gen = NULL; 204 _tenuring_threshold = MaxTenuringThreshold; 205 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 206 207 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 208 } 209 210 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 211 bool clear_space, 212 bool mangle_space) { 213 uintx alignment = 214 GenCollectedHeap::heap()->collector_policy()->min_alignment(); 215 216 // If the spaces are being cleared (only done at heap initialization 217 // currently), the survivor spaces need not be empty. 218 // Otherwise, no care is taken for used areas in the survivor spaces 219 // so check. 220 assert(clear_space || (to()->is_empty() && from()->is_empty()), 221 "Initialization of the survivor spaces assumes these are empty"); 222 223 // Compute sizes 224 uintx size = _virtual_space.committed_size(); 225 uintx survivor_size = compute_survivor_size(size, alignment); 226 uintx eden_size = size - (2*survivor_size); 227 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 228 229 if (eden_size < minimum_eden_size) { 230 // May happen due to 64Kb rounding, if so adjust eden size back up 231 minimum_eden_size = align_size_up(minimum_eden_size, alignment); 232 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 233 uintx unaligned_survivor_size = 234 align_size_down(maximum_survivor_size, alignment); 235 survivor_size = MAX2(unaligned_survivor_size, alignment); 236 eden_size = size - (2*survivor_size); 237 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 238 assert(eden_size >= minimum_eden_size, "just checking"); 239 } 240 241 char *eden_start = _virtual_space.low(); 242 char *from_start = eden_start + eden_size; 243 char *to_start = from_start + survivor_size; 244 char *to_end = to_start + survivor_size; 245 246 assert(to_end == _virtual_space.high(), "just checking"); 247 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); 248 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); 249 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); 250 251 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 252 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 253 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 254 255 // A minimum eden size implies that there is a part of eden that 256 // is being used and that affects the initialization of any 257 // newly formed eden. 258 bool live_in_eden = minimum_eden_size > 0; 259 260 // If not clearing the spaces, do some checking to verify that 261 // the space are already mangled. 262 if (!clear_space) { 263 // Must check mangling before the spaces are reshaped. Otherwise, 264 // the bottom or end of one space may have moved into another 265 // a failure of the check may not correctly indicate which space 266 // is not properly mangled. 267 if (ZapUnusedHeapArea) { 268 HeapWord* limit = (HeapWord*) _virtual_space.high(); 269 eden()->check_mangled_unused_area(limit); 270 from()->check_mangled_unused_area(limit); 271 to()->check_mangled_unused_area(limit); 272 } 273 } 274 275 // Reset the spaces for their new regions. 276 eden()->initialize(edenMR, 277 clear_space && !live_in_eden, 278 SpaceDecorator::Mangle); 279 // If clear_space and live_in_eden, we will not have cleared any 280 // portion of eden above its top. This can cause newly 281 // expanded space not to be mangled if using ZapUnusedHeapArea. 282 // We explicitly do such mangling here. 283 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 284 eden()->mangle_unused_area(); 285 } 286 from()->initialize(fromMR, clear_space, mangle_space); 287 to()->initialize(toMR, clear_space, mangle_space); 288 289 // Set next compaction spaces. 290 eden()->set_next_compaction_space(from()); 291 // The to-space is normally empty before a compaction so need 292 // not be considered. The exception is during promotion 293 // failure handling when to-space can contain live objects. 294 from()->set_next_compaction_space(NULL); 295 } 296 297 void DefNewGeneration::swap_spaces() { 298 ContiguousSpace* s = from(); 299 _from_space = to(); 300 _to_space = s; 301 eden()->set_next_compaction_space(from()); 302 // The to-space is normally empty before a compaction so need 303 // not be considered. The exception is during promotion 304 // failure handling when to-space can contain live objects. 305 from()->set_next_compaction_space(NULL); 306 307 if (UsePerfData) { 308 CSpaceCounters* c = _from_counters; 309 _from_counters = _to_counters; 310 _to_counters = c; 311 } 312 } 313 314 bool DefNewGeneration::expand(size_t bytes) { 315 MutexLocker x(ExpandHeap_lock); 316 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 317 bool success = _virtual_space.expand_by(bytes); 318 if (success && ZapUnusedHeapArea) { 319 // Mangle newly committed space immediately because it 320 // can be done here more simply that after the new 321 // spaces have been computed. 322 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 323 MemRegion mangle_region(prev_high, new_high); 324 SpaceMangler::mangle_region(mangle_region); 325 } 326 327 // Do not attempt an expand-to-the reserve size. The 328 // request should properly observe the maximum size of 329 // the generation so an expand-to-reserve should be 330 // unnecessary. Also a second call to expand-to-reserve 331 // value potentially can cause an undue expansion. 332 // For example if the first expand fail for unknown reasons, 333 // but the second succeeds and expands the heap to its maximum 334 // value. 335 if (GC_locker::is_active()) { 336 if (PrintGC && Verbose) { 337 gclog_or_tty->print_cr("Garbage collection disabled, " 338 "expanded heap instead"); 339 } 340 } 341 342 return success; 343 } 344 345 346 void DefNewGeneration::compute_new_size() { 347 // This is called after a gc that includes the following generation 348 // (which is required to exist.) So from-space will normally be empty. 349 // Note that we check both spaces, since if scavenge failed they revert roles. 350 // If not we bail out (otherwise we would have to relocate the objects) 351 if (!from()->is_empty() || !to()->is_empty()) { 352 return; 353 } 354 355 int next_level = level() + 1; 356 GenCollectedHeap* gch = GenCollectedHeap::heap(); 357 assert(next_level < gch->_n_gens, 358 "DefNewGeneration cannot be an oldest gen"); 359 360 Generation* next_gen = gch->_gens[next_level]; 361 size_t old_size = next_gen->capacity(); 362 size_t new_size_before = _virtual_space.committed_size(); 363 size_t min_new_size = spec()->init_size(); 364 size_t max_new_size = reserved().byte_size(); 365 assert(min_new_size <= new_size_before && 366 new_size_before <= max_new_size, 367 "just checking"); 368 // All space sizes must be multiples of Generation::GenGrain. 369 size_t alignment = Generation::GenGrain; 370 371 // Compute desired new generation size based on NewRatio and 372 // NewSizeThreadIncrease 373 size_t desired_new_size = old_size/NewRatio; 374 int threads_count = Threads::number_of_non_daemon_threads(); 375 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; 376 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); 377 378 // Adjust new generation size 379 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 380 assert(desired_new_size <= max_new_size, "just checking"); 381 382 bool changed = false; 383 if (desired_new_size > new_size_before) { 384 size_t change = desired_new_size - new_size_before; 385 assert(change % alignment == 0, "just checking"); 386 if (expand(change)) { 387 changed = true; 388 } 389 // If the heap failed to expand to the desired size, 390 // "changed" will be false. If the expansion failed 391 // (and at this point it was expected to succeed), 392 // ignore the failure (leaving "changed" as false). 393 } 394 if (desired_new_size < new_size_before && eden()->is_empty()) { 395 // bail out of shrinking if objects in eden 396 size_t change = new_size_before - desired_new_size; 397 assert(change % alignment == 0, "just checking"); 398 _virtual_space.shrink_by(change); 399 changed = true; 400 } 401 if (changed) { 402 // The spaces have already been mangled at this point but 403 // may not have been cleared (set top = bottom) and should be. 404 // Mangling was done when the heap was being expanded. 405 compute_space_boundaries(eden()->used(), 406 SpaceDecorator::Clear, 407 SpaceDecorator::DontMangle); 408 MemRegion cmr((HeapWord*)_virtual_space.low(), 409 (HeapWord*)_virtual_space.high()); 410 Universe::heap()->barrier_set()->resize_covered_region(cmr); 411 if (Verbose && PrintGC) { 412 size_t new_size_after = _virtual_space.committed_size(); 413 size_t eden_size_after = eden()->capacity(); 414 size_t survivor_size_after = from()->capacity(); 415 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" 416 SIZE_FORMAT "K [eden=" 417 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 418 new_size_before/K, new_size_after/K, 419 eden_size_after/K, survivor_size_after/K); 420 if (WizardMode) { 421 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 422 thread_increase_size/K, threads_count); 423 } 424 gclog_or_tty->cr(); 425 } 426 } 427 } 428 429 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) { 430 // $$$ This may be wrong in case of "scavenge failure"? 431 eden()->object_iterate(cl); 432 } 433 434 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 435 assert(false, "NYI -- are you sure you want to call this?"); 436 } 437 438 439 size_t DefNewGeneration::capacity() const { 440 return eden()->capacity() 441 + from()->capacity(); // to() is only used during scavenge 442 } 443 444 445 size_t DefNewGeneration::used() const { 446 return eden()->used() 447 + from()->used(); // to() is only used during scavenge 448 } 449 450 451 size_t DefNewGeneration::free() const { 452 return eden()->free() 453 + from()->free(); // to() is only used during scavenge 454 } 455 456 size_t DefNewGeneration::max_capacity() const { 457 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); 458 const size_t reserved_bytes = reserved().byte_size(); 459 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 460 } 461 462 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 463 return eden()->free(); 464 } 465 466 size_t DefNewGeneration::capacity_before_gc() const { 467 return eden()->capacity(); 468 } 469 470 size_t DefNewGeneration::contiguous_available() const { 471 return eden()->free(); 472 } 473 474 475 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } 476 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 477 478 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 479 eden()->object_iterate(blk); 480 from()->object_iterate(blk); 481 } 482 483 484 void DefNewGeneration::space_iterate(SpaceClosure* blk, 485 bool usedOnly) { 486 blk->do_space(eden()); 487 blk->do_space(from()); 488 blk->do_space(to()); 489 } 490 491 // The last collection bailed out, we are running out of heap space, 492 // so we try to allocate the from-space, too. 493 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 494 HeapWord* result = NULL; 495 if (Verbose && PrintGCDetails) { 496 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" 497 " will_fail: %s" 498 " heap_lock: %s" 499 " free: " SIZE_FORMAT, 500 size, 501 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 502 "true" : "false", 503 Heap_lock->is_locked() ? "locked" : "unlocked", 504 from()->free()); 505 } 506 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { 507 if (Heap_lock->owned_by_self() || 508 (SafepointSynchronize::is_at_safepoint() && 509 Thread::current()->is_VM_thread())) { 510 // If the Heap_lock is not locked by this thread, this will be called 511 // again later with the Heap_lock held. 512 result = from()->allocate(size); 513 } else if (PrintGC && Verbose) { 514 gclog_or_tty->print_cr(" Heap_lock is not owned by self"); 515 } 516 } else if (PrintGC && Verbose) { 517 gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); 518 } 519 if (PrintGC && Verbose) { 520 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); 521 } 522 return result; 523 } 524 525 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 526 bool is_tlab, 527 bool parallel) { 528 // We don't attempt to expand the young generation (but perhaps we should.) 529 return allocate(size, is_tlab); 530 } 531 532 533 void DefNewGeneration::collect(bool full, 534 bool clear_all_soft_refs, 535 size_t size, 536 bool is_tlab) { 537 assert(full || size > 0, "otherwise we don't want to collect"); 538 539 GenCollectedHeap* gch = GenCollectedHeap::heap(); 540 541 _gc_timer->register_gc_start(os::elapsed_counter()); 542 DefNewTracer gc_tracer; 543 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 544 545 _next_gen = gch->next_gen(this); 546 assert(_next_gen != NULL, 547 "This must be the youngest gen, and not the only gen"); 548 549 // If the next generation is too full to accommodate promotion 550 // from this generation, pass on collection; let the next generation 551 // do it. 552 if (!collection_attempt_is_safe()) { 553 if (Verbose && PrintGCDetails) { 554 gclog_or_tty->print(" :: Collection attempt not safe :: "); 555 } 556 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 557 return; 558 } 559 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 560 561 init_assuming_no_promotion_failure(); 562 563 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); 564 // Capture heap used before collection (for printing). 565 size_t gch_prev_used = gch->used(); 566 567 gch->trace_heap_before_gc(&gc_tracer); 568 569 SpecializationStats::clear(); 570 571 // These can be shared for all code paths 572 IsAliveClosure is_alive(this); 573 ScanWeakRefClosure scan_weak_ref(this); 574 575 age_table()->clear(); 576 to()->clear(SpaceDecorator::Mangle); 577 578 gch->rem_set()->prepare_for_younger_refs_iterate(false); 579 580 assert(gch->no_allocs_since_save_marks(0), 581 "save marks have not been newly set."); 582 583 // Not very pretty. 584 CollectorPolicy* cp = gch->collector_policy(); 585 586 FastScanClosure fsc_with_no_gc_barrier(this, false); 587 FastScanClosure fsc_with_gc_barrier(this, true); 588 589 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 590 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, 591 &fsc_with_no_gc_barrier, 592 &fsc_with_gc_barrier); 593 594 assert(gch->no_allocs_since_save_marks(0), 595 "save marks have not been newly set."); 596 597 gch->gen_process_strong_roots(_level, 598 true, // Process younger gens, if any, 599 // as strong roots. 600 true, // activate StrongRootsScope 601 false, // not collecting perm generation. 602 SharedHeap::SO_AllClasses, 603 &fsc_with_no_gc_barrier, 604 true, // walk *all* scavengable nmethods 605 &fsc_with_gc_barrier); 606 607 // "evacuate followers". 608 evacuate_followers.do_void(); 609 610 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 611 ReferenceProcessor* rp = ref_processor(); 612 rp->setup_policy(clear_all_soft_refs); 613 const ReferenceProcessorStats& stats = 614 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 615 NULL, _gc_timer); 616 gc_tracer.report_gc_reference_stats(stats); 617 if (!_promotion_failed) { 618 // Swap the survivor spaces. 619 eden()->clear(SpaceDecorator::Mangle); 620 from()->clear(SpaceDecorator::Mangle); 621 if (ZapUnusedHeapArea) { 622 // This is now done here because of the piece-meal mangling which 623 // can check for valid mangling at intermediate points in the 624 // collection(s). When a minor collection fails to collect 625 // sufficient space resizing of the young generation can occur 626 // an redistribute the spaces in the young generation. Mangle 627 // here so that unzapped regions don't get distributed to 628 // other spaces. 629 to()->mangle_unused_area(); 630 } 631 swap_spaces(); 632 633 assert(to()->is_empty(), "to space should be empty now"); 634 635 // Set the desired survivor size to half the real survivor space 636 _tenuring_threshold = 637 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 638 639 // A successful scavenge should restart the GC time limit count which is 640 // for full GC's. 641 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 642 size_policy->reset_gc_overhead_limit_count(); 643 if (PrintGC && !PrintGCDetails) { 644 gch->print_heap_change(gch_prev_used); 645 } 646 assert(!gch->incremental_collection_failed(), "Should be clear"); 647 } else { 648 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 649 _promo_failure_scan_stack.clear(true); // Clear cached segments. 650 651 remove_forwarding_pointers(); 652 if (PrintGCDetails) { 653 gclog_or_tty->print(" (promotion failed) "); 654 } 655 // Add to-space to the list of space to compact 656 // when a promotion failure has occurred. In that 657 // case there can be live objects in to-space 658 // as a result of a partial evacuation of eden 659 // and from-space. 660 swap_spaces(); // For uniformity wrt ParNewGeneration. 661 from()->set_next_compaction_space(to()); 662 gch->set_incremental_collection_failed(); 663 664 // Inform the next generation that a promotion failure occurred. 665 _next_gen->promotion_failure_occurred(); 666 gc_tracer.report_promotion_failed(_promotion_failed_info); 667 668 // Reset the PromotionFailureALot counters. 669 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 670 } 671 // set new iteration safe limit for the survivor spaces 672 from()->set_concurrent_iteration_safe_limit(from()->top()); 673 to()->set_concurrent_iteration_safe_limit(to()->top()); 674 SpecializationStats::print(); 675 676 // We need to use a monotonically non-decreasing time in ms 677 // or we will see time-warp warnings and os::javaTimeMillis() 678 // does not guarantee monotonicity. 679 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 680 update_time_of_last_gc(now); 681 682 gch->trace_heap_after_gc(&gc_tracer); 683 684 _gc_timer->register_gc_end(os::elapsed_counter()); 685 686 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 687 } 688 689 class RemoveForwardPointerClosure: public ObjectClosure { 690 public: 691 void do_object(oop obj) { 692 obj->init_mark(); 693 } 694 }; 695 696 void DefNewGeneration::init_assuming_no_promotion_failure() { 697 _promotion_failed = false; 698 _promotion_failed_info.reset(); 699 from()->set_next_compaction_space(NULL); 700 } 701 702 void DefNewGeneration::remove_forwarding_pointers() { 703 RemoveForwardPointerClosure rspc; 704 eden()->object_iterate(&rspc); 705 from()->object_iterate(&rspc); 706 707 // Now restore saved marks, if any. 708 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), 709 "should be the same"); 710 while (!_objs_with_preserved_marks.is_empty()) { 711 oop obj = _objs_with_preserved_marks.pop(); 712 markOop m = _preserved_marks_of_objs.pop(); 713 obj->set_mark(m); 714 } 715 _objs_with_preserved_marks.clear(true); 716 _preserved_marks_of_objs.clear(true); 717 } 718 719 void DefNewGeneration::preserve_mark(oop obj, markOop m) { 720 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), 721 "Oversaving!"); 722 _objs_with_preserved_marks.push(obj); 723 _preserved_marks_of_objs.push(m); 724 } 725 726 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 727 if (m->must_be_preserved_for_promotion_failure(obj)) { 728 preserve_mark(obj, m); 729 } 730 } 731 732 void DefNewGeneration::handle_promotion_failure(oop old) { 733 if (PrintPromotionFailure && !_promotion_failed) { 734 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ", 735 old->size()); 736 } 737 _promotion_failed = true; 738 _promotion_failed_info.register_copy_failure(old->size()); 739 preserve_mark_if_necessary(old, old->mark()); 740 // forward to self 741 old->forward_to(old); 742 743 _promo_failure_scan_stack.push(old); 744 745 if (!_promo_failure_drain_in_progress) { 746 // prevent recursion in copy_to_survivor_space() 747 _promo_failure_drain_in_progress = true; 748 drain_promo_failure_scan_stack(); 749 _promo_failure_drain_in_progress = false; 750 } 751 } 752 753 oop DefNewGeneration::copy_to_survivor_space(oop old) { 754 assert(is_in_reserved(old) && !old->is_forwarded(), 755 "shouldn't be scavenging this oop"); 756 size_t s = old->size(); 757 oop obj = NULL; 758 759 // Try allocating obj in to-space (unless too old) 760 if (old->age() < tenuring_threshold()) { 761 obj = (oop) to()->allocate(s); 762 } 763 764 // Otherwise try allocating obj tenured 765 if (obj == NULL) { 766 obj = _next_gen->promote(old, s); 767 if (obj == NULL) { 768 handle_promotion_failure(old); 769 return old; 770 } 771 } else { 772 // Prefetch beyond obj 773 const intx interval = PrefetchCopyIntervalInBytes; 774 Prefetch::write(obj, interval); 775 776 // Copy obj 777 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 778 779 // Increment age if obj still in new generation 780 obj->incr_age(); 781 age_table()->add(obj, s); 782 } 783 784 // Done, insert forward pointer to obj in this header 785 old->forward_to(obj); 786 787 return obj; 788 } 789 790 void DefNewGeneration::drain_promo_failure_scan_stack() { 791 while (!_promo_failure_scan_stack.is_empty()) { 792 oop obj = _promo_failure_scan_stack.pop(); 793 obj->oop_iterate(_promo_failure_scan_stack_closure); 794 } 795 } 796 797 void DefNewGeneration::save_marks() { 798 eden()->set_saved_mark(); 799 to()->set_saved_mark(); 800 from()->set_saved_mark(); 801 } 802 803 804 void DefNewGeneration::reset_saved_marks() { 805 eden()->reset_saved_mark(); 806 to()->reset_saved_mark(); 807 from()->reset_saved_mark(); 808 } 809 810 811 bool DefNewGeneration::no_allocs_since_save_marks() { 812 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 813 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 814 return to()->saved_mark_at_top(); 815 } 816 817 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 818 \ 819 void DefNewGeneration:: \ 820 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 821 cl->set_generation(this); \ 822 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 823 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 824 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 825 cl->reset_generation(); \ 826 save_marks(); \ 827 } 828 829 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 830 831 #undef DefNew_SINCE_SAVE_MARKS_DEFN 832 833 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 834 size_t max_alloc_words) { 835 if (requestor == this || _promotion_failed) return; 836 assert(requestor->level() > level(), "DefNewGeneration must be youngest"); 837 838 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 839 if (to_space->top() > to_space->bottom()) { 840 trace("to_space not empty when contribute_scratch called"); 841 } 842 */ 843 844 ContiguousSpace* to_space = to(); 845 assert(to_space->end() >= to_space->top(), "pointers out of order"); 846 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 847 if (free_words >= MinFreeScratchWords) { 848 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 849 sb->num_words = free_words; 850 sb->next = list; 851 list = sb; 852 } 853 } 854 855 void DefNewGeneration::reset_scratch() { 856 // If contributing scratch in to_space, mangle all of 857 // to_space if ZapUnusedHeapArea. This is needed because 858 // top is not maintained while using to-space as scratch. 859 if (ZapUnusedHeapArea) { 860 to()->mangle_unused_area_complete(); 861 } 862 } 863 864 bool DefNewGeneration::collection_attempt_is_safe() { 865 if (!to()->is_empty()) { 866 if (Verbose && PrintGCDetails) { 867 gclog_or_tty->print(" :: to is not empty :: "); 868 } 869 return false; 870 } 871 if (_next_gen == NULL) { 872 GenCollectedHeap* gch = GenCollectedHeap::heap(); 873 _next_gen = gch->next_gen(this); 874 assert(_next_gen != NULL, 875 "This must be the youngest gen, and not the only gen"); 876 } 877 return _next_gen->promotion_attempt_is_safe(used()); 878 } 879 880 void DefNewGeneration::gc_epilogue(bool full) { 881 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 882 883 assert(!GC_locker::is_active(), "We should not be executing here"); 884 // Check if the heap is approaching full after a collection has 885 // been done. Generally the young generation is empty at 886 // a minimum at the end of a collection. If it is not, then 887 // the heap is approaching full. 888 GenCollectedHeap* gch = GenCollectedHeap::heap(); 889 if (full) { 890 DEBUG_ONLY(seen_incremental_collection_failed = false;) 891 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 892 if (Verbose && PrintGCDetails) { 893 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 894 GCCause::to_string(gch->gc_cause())); 895 } 896 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 897 set_should_allocate_from_space(); // we seem to be running out of space 898 } else { 899 if (Verbose && PrintGCDetails) { 900 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 901 GCCause::to_string(gch->gc_cause())); 902 } 903 gch->clear_incremental_collection_failed(); // We just did a full collection 904 clear_should_allocate_from_space(); // if set 905 } 906 } else { 907 #ifdef ASSERT 908 // It is possible that incremental_collection_failed() == true 909 // here, because an attempted scavenge did not succeed. The policy 910 // is normally expected to cause a full collection which should 911 // clear that condition, so we should not be here twice in a row 912 // with incremental_collection_failed() == true without having done 913 // a full collection in between. 914 if (!seen_incremental_collection_failed && 915 gch->incremental_collection_failed()) { 916 if (Verbose && PrintGCDetails) { 917 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 918 GCCause::to_string(gch->gc_cause())); 919 } 920 seen_incremental_collection_failed = true; 921 } else if (seen_incremental_collection_failed) { 922 if (Verbose && PrintGCDetails) { 923 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 924 GCCause::to_string(gch->gc_cause())); 925 } 926 assert(gch->gc_cause() == GCCause::_scavenge_alot || 927 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 928 !gch->incremental_collection_failed(), 929 "Twice in a row"); 930 seen_incremental_collection_failed = false; 931 } 932 #endif // ASSERT 933 } 934 935 if (ZapUnusedHeapArea) { 936 eden()->check_mangled_unused_area_complete(); 937 from()->check_mangled_unused_area_complete(); 938 to()->check_mangled_unused_area_complete(); 939 } 940 941 if (!CleanChunkPoolAsync) { 942 Chunk::clean_chunk_pool(); 943 } 944 945 // update the generation and space performance counters 946 update_counters(); 947 gch->collector_policy()->counters()->update_counters(); 948 } 949 950 void DefNewGeneration::record_spaces_top() { 951 assert(ZapUnusedHeapArea, "Not mangling unused space"); 952 eden()->set_top_for_allocations(); 953 to()->set_top_for_allocations(); 954 from()->set_top_for_allocations(); 955 } 956 957 void DefNewGeneration::ref_processor_init() { 958 Generation::ref_processor_init(); 959 } 960 961 962 void DefNewGeneration::update_counters() { 963 if (UsePerfData) { 964 _eden_counters->update_all(); 965 _from_counters->update_all(); 966 _to_counters->update_all(); 967 _gen_counters->update_all(); 968 } 969 } 970 971 void DefNewGeneration::verify() { 972 eden()->verify(); 973 from()->verify(); 974 to()->verify(); 975 } 976 977 void DefNewGeneration::print_on(outputStream* st) const { 978 Generation::print_on(st); 979 st->print(" eden"); 980 eden()->print_on(st); 981 st->print(" from"); 982 from()->print_on(st); 983 st->print(" to "); 984 to()->print_on(st); 985 } 986 987 988 const char* DefNewGeneration::name() const { 989 return "def new generation"; 990 } 991 992 // Moved from inline file as they are not called inline 993 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 994 return eden(); 995 } 996 997 HeapWord* DefNewGeneration::allocate(size_t word_size, 998 bool is_tlab) { 999 // This is the slow-path allocation for the DefNewGeneration. 1000 // Most allocations are fast-path in compiled code. 1001 // We try to allocate from the eden. If that works, we are happy. 1002 // Note that since DefNewGeneration supports lock-free allocation, we 1003 // have to use it here, as well. 1004 HeapWord* result = eden()->par_allocate(word_size); 1005 if (result != NULL) { 1006 return result; 1007 } 1008 do { 1009 HeapWord* old_limit = eden()->soft_end(); 1010 if (old_limit < eden()->end()) { 1011 // Tell the next generation we reached a limit. 1012 HeapWord* new_limit = 1013 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); 1014 if (new_limit != NULL) { 1015 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); 1016 } else { 1017 assert(eden()->soft_end() == eden()->end(), 1018 "invalid state after allocation_limit_reached returned null"); 1019 } 1020 } else { 1021 // The allocation failed and the soft limit is equal to the hard limit, 1022 // there are no reasons to do an attempt to allocate 1023 assert(old_limit == eden()->end(), "sanity check"); 1024 break; 1025 } 1026 // Try to allocate until succeeded or the soft limit can't be adjusted 1027 result = eden()->par_allocate(word_size); 1028 } while (result == NULL); 1029 1030 // If the eden is full and the last collection bailed out, we are running 1031 // out of heap space, and we try to allocate the from-space, too. 1032 // allocate_from_space can't be inlined because that would introduce a 1033 // circular dependency at compile time. 1034 if (result == NULL) { 1035 result = allocate_from_space(word_size); 1036 } 1037 return result; 1038 } 1039 1040 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1041 bool is_tlab) { 1042 return eden()->par_allocate(word_size); 1043 } 1044 1045 void DefNewGeneration::gc_prologue(bool full) { 1046 // Ensure that _end and _soft_end are the same in eden space. 1047 eden()->set_soft_end(eden()->end()); 1048 } 1049 1050 size_t DefNewGeneration::tlab_capacity() const { 1051 return eden()->capacity(); 1052 } 1053 1054 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1055 return unsafe_max_alloc_nogc(); 1056 }