1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/collectorCounters.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/spaceDecorator.hpp" 29 #include "memory/defNewGeneration.inline.hpp" 30 #include "memory/gcLocker.inline.hpp" 31 #include "memory/genCollectedHeap.hpp" 32 #include "memory/genOopClosures.inline.hpp" 33 #include "memory/generationSpec.hpp" 34 #include "memory/iterator.hpp" 35 #include "memory/referencePolicy.hpp" 36 #include "memory/space.inline.hpp" 37 #include "oops/instanceRefKlass.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/java.hpp" 40 #include "utilities/copy.hpp" 41 #ifdef TARGET_OS_FAMILY_linux 42 # include "thread_linux.inline.hpp" 43 #endif 44 #ifdef TARGET_OS_FAMILY_solaris 45 # include "thread_solaris.inline.hpp" 46 #endif 47 #ifdef TARGET_OS_FAMILY_windows 48 # include "thread_windows.inline.hpp" 49 #endif 50 51 // 52 // DefNewGeneration functions. 53 54 // Methods of protected closure types. 55 56 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { 57 assert(g->level() == 0, "Optimized for youngest gen."); 58 } 59 void DefNewGeneration::IsAliveClosure::do_object(oop p) { 60 assert(false, "Do not call."); 61 } 62 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 63 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); 64 } 65 66 DefNewGeneration::KeepAliveClosure:: 67 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 68 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); 69 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); 70 _rs = (CardTableRS*)rs; 71 } 72 73 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 74 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 75 76 77 DefNewGeneration::FastKeepAliveClosure:: 78 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 79 DefNewGeneration::KeepAliveClosure(cl) { 80 _boundary = g->reserved().end(); 81 } 82 83 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 84 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 85 86 DefNewGeneration::EvacuateFollowersClosure:: 87 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, 88 ScanClosure* cur, ScanClosure* older) : 89 _gch(gch), _level(level), 90 _scan_cur_or_nonheap(cur), _scan_older(older) 91 {} 92 93 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 94 do { 95 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 96 _scan_older); 97 } while (!_gch->no_allocs_since_save_marks(_level)); 98 } 99 100 DefNewGeneration::FastEvacuateFollowersClosure:: 101 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, 102 DefNewGeneration* gen, 103 FastScanClosure* cur, FastScanClosure* older) : 104 _gch(gch), _level(level), _gen(gen), 105 _scan_cur_or_nonheap(cur), _scan_older(older) 106 {} 107 108 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 109 do { 110 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 111 _scan_older); 112 } while (!_gch->no_allocs_since_save_marks(_level)); 113 guarantee(_gen->promo_failure_scan_stack() == NULL 114 || _gen->promo_failure_scan_stack()->length() == 0, 115 "Failed to finish scan"); 116 } 117 118 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 119 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) 120 { 121 assert(_g->level() == 0, "Optimized for youngest generation"); 122 _boundary = _g->reserved().end(); 123 } 124 125 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 126 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 127 128 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 129 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) 130 { 131 assert(_g->level() == 0, "Optimized for youngest generation"); 132 _boundary = _g->reserved().end(); 133 } 134 135 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 136 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 137 138 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 139 OopClosure(g->ref_processor()), _g(g) 140 { 141 assert(_g->level() == 0, "Optimized for youngest generation"); 142 _boundary = _g->reserved().end(); 143 } 144 145 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 146 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 147 148 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 149 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 150 151 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 152 size_t initial_size, 153 int level, 154 const char* policy) 155 : Generation(rs, initial_size, level), 156 _objs_with_preserved_marks(NULL), 157 _preserved_marks_of_objs(NULL), 158 _promo_failure_scan_stack(NULL), 159 _promo_failure_drain_in_progress(false), 160 _should_allocate_from_space(false) 161 { 162 MemRegion cmr((HeapWord*)_virtual_space.low(), 163 (HeapWord*)_virtual_space.high()); 164 Universe::heap()->barrier_set()->resize_covered_region(cmr); 165 166 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { 167 _eden_space = new ConcEdenSpace(this); 168 } else { 169 _eden_space = new EdenSpace(this); 170 } 171 _from_space = new ContiguousSpace(); 172 _to_space = new ContiguousSpace(); 173 174 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) 175 vm_exit_during_initialization("Could not allocate a new gen space"); 176 177 // Compute the maximum eden and survivor space sizes. These sizes 178 // are computed assuming the entire reserved space is committed. 179 // These values are exported as performance counters. 180 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); 181 uintx size = _virtual_space.reserved_size(); 182 _max_survivor_size = compute_survivor_size(size, alignment); 183 _max_eden_size = size - (2*_max_survivor_size); 184 185 // allocate the performance counters 186 187 // Generation counters -- generation 0, 3 subspaces 188 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); 189 _gc_counters = new CollectorCounters(policy, 0); 190 191 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 192 _gen_counters); 193 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 194 _gen_counters); 195 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 196 _gen_counters); 197 198 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 199 update_counters(); 200 _next_gen = NULL; 201 _tenuring_threshold = MaxTenuringThreshold; 202 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 203 } 204 205 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 206 bool clear_space, 207 bool mangle_space) { 208 uintx alignment = 209 GenCollectedHeap::heap()->collector_policy()->min_alignment(); 210 211 // If the spaces are being cleared (only done at heap initialization 212 // currently), the survivor spaces need not be empty. 213 // Otherwise, no care is taken for used areas in the survivor spaces 214 // so check. 215 assert(clear_space || (to()->is_empty() && from()->is_empty()), 216 "Initialization of the survivor spaces assumes these are empty"); 217 218 // Compute sizes 219 uintx size = _virtual_space.committed_size(); 220 uintx survivor_size = compute_survivor_size(size, alignment); 221 uintx eden_size = size - (2*survivor_size); 222 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 223 224 if (eden_size < minimum_eden_size) { 225 // May happen due to 64Kb rounding, if so adjust eden size back up 226 minimum_eden_size = align_size_up(minimum_eden_size, alignment); 227 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 228 uintx unaligned_survivor_size = 229 align_size_down(maximum_survivor_size, alignment); 230 survivor_size = MAX2(unaligned_survivor_size, alignment); 231 eden_size = size - (2*survivor_size); 232 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 233 assert(eden_size >= minimum_eden_size, "just checking"); 234 } 235 236 char *eden_start = _virtual_space.low(); 237 char *from_start = eden_start + eden_size; 238 char *to_start = from_start + survivor_size; 239 char *to_end = to_start + survivor_size; 240 241 assert(to_end == _virtual_space.high(), "just checking"); 242 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); 243 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); 244 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); 245 246 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 247 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 248 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 249 250 // A minimum eden size implies that there is a part of eden that 251 // is being used and that affects the initialization of any 252 // newly formed eden. 253 bool live_in_eden = minimum_eden_size > 0; 254 255 // If not clearing the spaces, do some checking to verify that 256 // the space are already mangled. 257 if (!clear_space) { 258 // Must check mangling before the spaces are reshaped. Otherwise, 259 // the bottom or end of one space may have moved into another 260 // a failure of the check may not correctly indicate which space 261 // is not properly mangled. 262 if (ZapUnusedHeapArea) { 263 HeapWord* limit = (HeapWord*) _virtual_space.high(); 264 eden()->check_mangled_unused_area(limit); 265 from()->check_mangled_unused_area(limit); 266 to()->check_mangled_unused_area(limit); 267 } 268 } 269 270 // Reset the spaces for their new regions. 271 eden()->initialize(edenMR, 272 clear_space && !live_in_eden, 273 SpaceDecorator::Mangle); 274 // If clear_space and live_in_eden, we will not have cleared any 275 // portion of eden above its top. This can cause newly 276 // expanded space not to be mangled if using ZapUnusedHeapArea. 277 // We explicitly do such mangling here. 278 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 279 eden()->mangle_unused_area(); 280 } 281 from()->initialize(fromMR, clear_space, mangle_space); 282 to()->initialize(toMR, clear_space, mangle_space); 283 284 // Set next compaction spaces. 285 eden()->set_next_compaction_space(from()); 286 // The to-space is normally empty before a compaction so need 287 // not be considered. The exception is during promotion 288 // failure handling when to-space can contain live objects. 289 from()->set_next_compaction_space(NULL); 290 } 291 292 void DefNewGeneration::swap_spaces() { 293 ContiguousSpace* s = from(); 294 _from_space = to(); 295 _to_space = s; 296 eden()->set_next_compaction_space(from()); 297 // The to-space is normally empty before a compaction so need 298 // not be considered. The exception is during promotion 299 // failure handling when to-space can contain live objects. 300 from()->set_next_compaction_space(NULL); 301 302 if (UsePerfData) { 303 CSpaceCounters* c = _from_counters; 304 _from_counters = _to_counters; 305 _to_counters = c; 306 } 307 } 308 309 bool DefNewGeneration::expand(size_t bytes) { 310 MutexLocker x(ExpandHeap_lock); 311 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 312 bool success = _virtual_space.expand_by(bytes); 313 if (success && ZapUnusedHeapArea) { 314 // Mangle newly committed space immediately because it 315 // can be done here more simply that after the new 316 // spaces have been computed. 317 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 318 MemRegion mangle_region(prev_high, new_high); 319 SpaceMangler::mangle_region(mangle_region); 320 } 321 322 // Do not attempt an expand-to-the reserve size. The 323 // request should properly observe the maximum size of 324 // the generation so an expand-to-reserve should be 325 // unnecessary. Also a second call to expand-to-reserve 326 // value potentially can cause an undue expansion. 327 // For example if the first expand fail for unknown reasons, 328 // but the second succeeds and expands the heap to its maximum 329 // value. 330 if (GC_locker::is_active()) { 331 if (PrintGC && Verbose) { 332 gclog_or_tty->print_cr("Garbage collection disabled, " 333 "expanded heap instead"); 334 } 335 } 336 337 return success; 338 } 339 340 341 void DefNewGeneration::compute_new_size() { 342 // This is called after a gc that includes the following generation 343 // (which is required to exist.) So from-space will normally be empty. 344 // Note that we check both spaces, since if scavenge failed they revert roles. 345 // If not we bail out (otherwise we would have to relocate the objects) 346 if (!from()->is_empty() || !to()->is_empty()) { 347 return; 348 } 349 350 int next_level = level() + 1; 351 GenCollectedHeap* gch = GenCollectedHeap::heap(); 352 assert(next_level < gch->_n_gens, 353 "DefNewGeneration cannot be an oldest gen"); 354 355 Generation* next_gen = gch->_gens[next_level]; 356 size_t old_size = next_gen->capacity(); 357 size_t new_size_before = _virtual_space.committed_size(); 358 size_t min_new_size = spec()->init_size(); 359 size_t max_new_size = reserved().byte_size(); 360 assert(min_new_size <= new_size_before && 361 new_size_before <= max_new_size, 362 "just checking"); 363 // All space sizes must be multiples of Generation::GenGrain. 364 size_t alignment = Generation::GenGrain; 365 366 // Compute desired new generation size based on NewRatio and 367 // NewSizeThreadIncrease 368 size_t desired_new_size = old_size/NewRatio; 369 int threads_count = Threads::number_of_non_daemon_threads(); 370 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; 371 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); 372 373 // Adjust new generation size 374 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 375 assert(desired_new_size <= max_new_size, "just checking"); 376 377 bool changed = false; 378 if (desired_new_size > new_size_before) { 379 size_t change = desired_new_size - new_size_before; 380 assert(change % alignment == 0, "just checking"); 381 if (expand(change)) { 382 changed = true; 383 } 384 // If the heap failed to expand to the desired size, 385 // "changed" will be false. If the expansion failed 386 // (and at this point it was expected to succeed), 387 // ignore the failure (leaving "changed" as false). 388 } 389 if (desired_new_size < new_size_before && eden()->is_empty()) { 390 // bail out of shrinking if objects in eden 391 size_t change = new_size_before - desired_new_size; 392 assert(change % alignment == 0, "just checking"); 393 _virtual_space.shrink_by(change); 394 changed = true; 395 } 396 if (changed) { 397 // The spaces have already been mangled at this point but 398 // may not have been cleared (set top = bottom) and should be. 399 // Mangling was done when the heap was being expanded. 400 compute_space_boundaries(eden()->used(), 401 SpaceDecorator::Clear, 402 SpaceDecorator::DontMangle); 403 MemRegion cmr((HeapWord*)_virtual_space.low(), 404 (HeapWord*)_virtual_space.high()); 405 Universe::heap()->barrier_set()->resize_covered_region(cmr); 406 if (Verbose && PrintGC) { 407 size_t new_size_after = _virtual_space.committed_size(); 408 size_t eden_size_after = eden()->capacity(); 409 size_t survivor_size_after = from()->capacity(); 410 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" 411 SIZE_FORMAT "K [eden=" 412 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 413 new_size_before/K, new_size_after/K, 414 eden_size_after/K, survivor_size_after/K); 415 if (WizardMode) { 416 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 417 thread_increase_size/K, threads_count); 418 } 419 gclog_or_tty->cr(); 420 } 421 } 422 } 423 424 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) { 425 // $$$ This may be wrong in case of "scavenge failure"? 426 eden()->object_iterate(cl); 427 } 428 429 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 430 assert(false, "NYI -- are you sure you want to call this?"); 431 } 432 433 434 size_t DefNewGeneration::capacity() const { 435 return eden()->capacity() 436 + from()->capacity(); // to() is only used during scavenge 437 } 438 439 440 size_t DefNewGeneration::used() const { 441 return eden()->used() 442 + from()->used(); // to() is only used during scavenge 443 } 444 445 446 size_t DefNewGeneration::free() const { 447 return eden()->free() 448 + from()->free(); // to() is only used during scavenge 449 } 450 451 size_t DefNewGeneration::max_capacity() const { 452 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); 453 const size_t reserved_bytes = reserved().byte_size(); 454 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 455 } 456 457 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 458 return eden()->free(); 459 } 460 461 size_t DefNewGeneration::capacity_before_gc() const { 462 return eden()->capacity(); 463 } 464 465 size_t DefNewGeneration::contiguous_available() const { 466 return eden()->free(); 467 } 468 469 470 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } 471 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 472 473 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 474 eden()->object_iterate(blk); 475 from()->object_iterate(blk); 476 } 477 478 479 void DefNewGeneration::space_iterate(SpaceClosure* blk, 480 bool usedOnly) { 481 blk->do_space(eden()); 482 blk->do_space(from()); 483 blk->do_space(to()); 484 } 485 486 // The last collection bailed out, we are running out of heap space, 487 // so we try to allocate the from-space, too. 488 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 489 HeapWord* result = NULL; 490 if (PrintGC && Verbose) { 491 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" 492 " will_fail: %s" 493 " heap_lock: %s" 494 " free: " SIZE_FORMAT, 495 size, 496 GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false", 497 Heap_lock->is_locked() ? "locked" : "unlocked", 498 from()->free()); 499 } 500 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { 501 if (Heap_lock->owned_by_self() || 502 (SafepointSynchronize::is_at_safepoint() && 503 Thread::current()->is_VM_thread())) { 504 // If the Heap_lock is not locked by this thread, this will be called 505 // again later with the Heap_lock held. 506 result = from()->allocate(size); 507 } else if (PrintGC && Verbose) { 508 gclog_or_tty->print_cr(" Heap_lock is not owned by self"); 509 } 510 } else if (PrintGC && Verbose) { 511 gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); 512 } 513 if (PrintGC && Verbose) { 514 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); 515 } 516 return result; 517 } 518 519 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 520 bool is_tlab, 521 bool parallel) { 522 // We don't attempt to expand the young generation (but perhaps we should.) 523 return allocate(size, is_tlab); 524 } 525 526 527 void DefNewGeneration::collect(bool full, 528 bool clear_all_soft_refs, 529 size_t size, 530 bool is_tlab) { 531 assert(full || size > 0, "otherwise we don't want to collect"); 532 GenCollectedHeap* gch = GenCollectedHeap::heap(); 533 _next_gen = gch->next_gen(this); 534 assert(_next_gen != NULL, 535 "This must be the youngest gen, and not the only gen"); 536 537 // If the next generation is too full to accomodate promotion 538 // from this generation, pass on collection; let the next generation 539 // do it. 540 if (!collection_attempt_is_safe()) { 541 gch->set_incremental_collection_will_fail(); 542 return; 543 } 544 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 545 546 init_assuming_no_promotion_failure(); 547 548 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); 549 // Capture heap used before collection (for printing). 550 size_t gch_prev_used = gch->used(); 551 552 SpecializationStats::clear(); 553 554 // These can be shared for all code paths 555 IsAliveClosure is_alive(this); 556 ScanWeakRefClosure scan_weak_ref(this); 557 558 age_table()->clear(); 559 to()->clear(SpaceDecorator::Mangle); 560 561 gch->rem_set()->prepare_for_younger_refs_iterate(false); 562 563 assert(gch->no_allocs_since_save_marks(0), 564 "save marks have not been newly set."); 565 566 // Not very pretty. 567 CollectorPolicy* cp = gch->collector_policy(); 568 569 FastScanClosure fsc_with_no_gc_barrier(this, false); 570 FastScanClosure fsc_with_gc_barrier(this, true); 571 572 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 573 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, 574 &fsc_with_no_gc_barrier, 575 &fsc_with_gc_barrier); 576 577 assert(gch->no_allocs_since_save_marks(0), 578 "save marks have not been newly set."); 579 580 gch->gen_process_strong_roots(_level, 581 true, // Process younger gens, if any, 582 // as strong roots. 583 true, // activate StrongRootsScope 584 false, // not collecting perm generation. 585 SharedHeap::SO_AllClasses, 586 &fsc_with_no_gc_barrier, 587 true, // walk *all* scavengable nmethods 588 &fsc_with_gc_barrier); 589 590 // "evacuate followers". 591 evacuate_followers.do_void(); 592 593 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 594 ReferenceProcessor* rp = ref_processor(); 595 rp->setup_policy(clear_all_soft_refs); 596 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 597 NULL); 598 if (!promotion_failed()) { 599 // Swap the survivor spaces. 600 eden()->clear(SpaceDecorator::Mangle); 601 from()->clear(SpaceDecorator::Mangle); 602 if (ZapUnusedHeapArea) { 603 // This is now done here because of the piece-meal mangling which 604 // can check for valid mangling at intermediate points in the 605 // collection(s). When a minor collection fails to collect 606 // sufficient space resizing of the young generation can occur 607 // an redistribute the spaces in the young generation. Mangle 608 // here so that unzapped regions don't get distributed to 609 // other spaces. 610 to()->mangle_unused_area(); 611 } 612 swap_spaces(); 613 614 assert(to()->is_empty(), "to space should be empty now"); 615 616 // Set the desired survivor size to half the real survivor space 617 _tenuring_threshold = 618 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 619 620 // A successful scavenge should restart the GC time limit count which is 621 // for full GC's. 622 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 623 size_policy->reset_gc_overhead_limit_count(); 624 if (PrintGC && !PrintGCDetails) { 625 gch->print_heap_change(gch_prev_used); 626 } 627 } else { 628 assert(HandlePromotionFailure, 629 "Should not be here unless promotion failure handling is on"); 630 assert(_promo_failure_scan_stack != NULL && 631 _promo_failure_scan_stack->length() == 0, "post condition"); 632 633 // deallocate stack and it's elements 634 delete _promo_failure_scan_stack; 635 _promo_failure_scan_stack = NULL; 636 637 remove_forwarding_pointers(); 638 if (PrintGCDetails) { 639 gclog_or_tty->print(" (promotion failed) "); 640 } 641 // Add to-space to the list of space to compact 642 // when a promotion failure has occurred. In that 643 // case there can be live objects in to-space 644 // as a result of a partial evacuation of eden 645 // and from-space. 646 swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect(). 647 from()->set_next_compaction_space(to()); 648 gch->set_incremental_collection_will_fail(); 649 650 // Inform the next generation that a promotion failure occurred. 651 _next_gen->promotion_failure_occurred(); 652 653 // Reset the PromotionFailureALot counters. 654 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 655 } 656 // set new iteration safe limit for the survivor spaces 657 from()->set_concurrent_iteration_safe_limit(from()->top()); 658 to()->set_concurrent_iteration_safe_limit(to()->top()); 659 SpecializationStats::print(); 660 update_time_of_last_gc(os::javaTimeMillis()); 661 } 662 663 class RemoveForwardPointerClosure: public ObjectClosure { 664 public: 665 void do_object(oop obj) { 666 obj->init_mark(); 667 } 668 }; 669 670 void DefNewGeneration::init_assuming_no_promotion_failure() { 671 _promotion_failed = false; 672 from()->set_next_compaction_space(NULL); 673 } 674 675 void DefNewGeneration::remove_forwarding_pointers() { 676 RemoveForwardPointerClosure rspc; 677 eden()->object_iterate(&rspc); 678 from()->object_iterate(&rspc); 679 // Now restore saved marks, if any. 680 if (_objs_with_preserved_marks != NULL) { 681 assert(_preserved_marks_of_objs != NULL, "Both or none."); 682 assert(_objs_with_preserved_marks->length() == 683 _preserved_marks_of_objs->length(), "Both or none."); 684 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { 685 oop obj = _objs_with_preserved_marks->at(i); 686 markOop m = _preserved_marks_of_objs->at(i); 687 obj->set_mark(m); 688 } 689 delete _objs_with_preserved_marks; 690 delete _preserved_marks_of_objs; 691 _objs_with_preserved_marks = NULL; 692 _preserved_marks_of_objs = NULL; 693 } 694 } 695 696 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 697 if (m->must_be_preserved_for_promotion_failure(obj)) { 698 if (_objs_with_preserved_marks == NULL) { 699 assert(_preserved_marks_of_objs == NULL, "Both or none."); 700 _objs_with_preserved_marks = new (ResourceObj::C_HEAP) 701 GrowableArray<oop>(PreserveMarkStackSize, true); 702 _preserved_marks_of_objs = new (ResourceObj::C_HEAP) 703 GrowableArray<markOop>(PreserveMarkStackSize, true); 704 } 705 _objs_with_preserved_marks->push(obj); 706 _preserved_marks_of_objs->push(m); 707 } 708 } 709 710 void DefNewGeneration::handle_promotion_failure(oop old) { 711 preserve_mark_if_necessary(old, old->mark()); 712 if (!_promotion_failed && PrintPromotionFailure) { 713 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ", 714 old->size()); 715 } 716 717 // forward to self 718 old->forward_to(old); 719 _promotion_failed = true; 720 721 push_on_promo_failure_scan_stack(old); 722 723 if (!_promo_failure_drain_in_progress) { 724 // prevent recursion in copy_to_survivor_space() 725 _promo_failure_drain_in_progress = true; 726 drain_promo_failure_scan_stack(); 727 _promo_failure_drain_in_progress = false; 728 } 729 } 730 731 oop DefNewGeneration::copy_to_survivor_space(oop old) { 732 assert(is_in_reserved(old) && !old->is_forwarded(), 733 "shouldn't be scavenging this oop"); 734 size_t s = old->size(); 735 oop obj = NULL; 736 737 // Try allocating obj in to-space (unless too old) 738 if (old->age() < tenuring_threshold()) { 739 obj = (oop) to()->allocate(s); 740 } 741 742 // Otherwise try allocating obj tenured 743 if (obj == NULL) { 744 obj = _next_gen->promote(old, s); 745 if (obj == NULL) { 746 if (!HandlePromotionFailure) { 747 // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag 748 // is incorrectly set. In any case, its seriously wrong to be here! 749 vm_exit_out_of_memory(s*wordSize, "promotion"); 750 } 751 752 handle_promotion_failure(old); 753 return old; 754 } 755 } else { 756 // Prefetch beyond obj 757 const intx interval = PrefetchCopyIntervalInBytes; 758 Prefetch::write(obj, interval); 759 760 // Copy obj 761 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 762 763 // Increment age if obj still in new generation 764 obj->incr_age(); 765 age_table()->add(obj, s); 766 } 767 768 // Done, insert forward pointer to obj in this header 769 old->forward_to(obj); 770 771 return obj; 772 } 773 774 void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) { 775 if (_promo_failure_scan_stack == NULL) { 776 _promo_failure_scan_stack = new (ResourceObj::C_HEAP) 777 GrowableArray<oop>(40, true); 778 } 779 780 _promo_failure_scan_stack->push(obj); 781 } 782 783 void DefNewGeneration::drain_promo_failure_scan_stack() { 784 assert(_promo_failure_scan_stack != NULL, "precondition"); 785 786 while (_promo_failure_scan_stack->length() > 0) { 787 oop obj = _promo_failure_scan_stack->pop(); 788 obj->oop_iterate(_promo_failure_scan_stack_closure); 789 } 790 } 791 792 void DefNewGeneration::save_marks() { 793 eden()->set_saved_mark(); 794 to()->set_saved_mark(); 795 from()->set_saved_mark(); 796 } 797 798 799 void DefNewGeneration::reset_saved_marks() { 800 eden()->reset_saved_mark(); 801 to()->reset_saved_mark(); 802 from()->reset_saved_mark(); 803 } 804 805 806 bool DefNewGeneration::no_allocs_since_save_marks() { 807 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 808 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 809 return to()->saved_mark_at_top(); 810 } 811 812 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 813 \ 814 void DefNewGeneration:: \ 815 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 816 cl->set_generation(this); \ 817 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 818 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 819 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 820 cl->reset_generation(); \ 821 save_marks(); \ 822 } 823 824 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 825 826 #undef DefNew_SINCE_SAVE_MARKS_DEFN 827 828 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 829 size_t max_alloc_words) { 830 if (requestor == this || _promotion_failed) return; 831 assert(requestor->level() > level(), "DefNewGeneration must be youngest"); 832 833 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 834 if (to_space->top() > to_space->bottom()) { 835 trace("to_space not empty when contribute_scratch called"); 836 } 837 */ 838 839 ContiguousSpace* to_space = to(); 840 assert(to_space->end() >= to_space->top(), "pointers out of order"); 841 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 842 if (free_words >= MinFreeScratchWords) { 843 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 844 sb->num_words = free_words; 845 sb->next = list; 846 list = sb; 847 } 848 } 849 850 void DefNewGeneration::reset_scratch() { 851 // If contributing scratch in to_space, mangle all of 852 // to_space if ZapUnusedHeapArea. This is needed because 853 // top is not maintained while using to-space as scratch. 854 if (ZapUnusedHeapArea) { 855 to()->mangle_unused_area_complete(); 856 } 857 } 858 859 bool DefNewGeneration::collection_attempt_is_safe() { 860 if (!to()->is_empty()) { 861 return false; 862 } 863 if (_next_gen == NULL) { 864 GenCollectedHeap* gch = GenCollectedHeap::heap(); 865 _next_gen = gch->next_gen(this); 866 assert(_next_gen != NULL, 867 "This must be the youngest gen, and not the only gen"); 868 } 869 870 // Decide if there's enough room for a full promotion 871 // When using extremely large edens, we effectively lose a 872 // large amount of old space. Use the "MaxLiveObjectEvacuationRatio" 873 // flag to reduce the minimum evacuation space requirements. If 874 // there is not enough space to evacuate eden during a scavenge, 875 // the VM will immediately exit with an out of memory error. 876 // This flag has not been tested 877 // with collectors other than simple mark & sweep. 878 // 879 // Note that with the addition of promotion failure handling, the 880 // VM will not immediately exit but will undo the young generation 881 // collection. The parameter is left here for compatibility. 882 const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0; 883 884 // worst_case_evacuation is based on "used()". For the case where this 885 // method is called after a collection, this is still appropriate because 886 // the case that needs to be detected is one in which a full collection 887 // has been done and has overflowed into the young generation. In that 888 // case a minor collection will fail (the overflow of the full collection 889 // means there is no space in the old generation for any promotion). 890 size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio); 891 892 return _next_gen->promotion_attempt_is_safe(worst_case_evacuation, 893 HandlePromotionFailure); 894 } 895 896 void DefNewGeneration::gc_epilogue(bool full) { 897 // Check if the heap is approaching full after a collection has 898 // been done. Generally the young generation is empty at 899 // a minimum at the end of a collection. If it is not, then 900 // the heap is approaching full. 901 GenCollectedHeap* gch = GenCollectedHeap::heap(); 902 clear_should_allocate_from_space(); 903 if (collection_attempt_is_safe()) { 904 gch->clear_incremental_collection_will_fail(); 905 } else { 906 gch->set_incremental_collection_will_fail(); 907 if (full) { // we seem to be running out of space 908 set_should_allocate_from_space(); 909 } 910 } 911 912 if (ZapUnusedHeapArea) { 913 eden()->check_mangled_unused_area_complete(); 914 from()->check_mangled_unused_area_complete(); 915 to()->check_mangled_unused_area_complete(); 916 } 917 918 // update the generation and space performance counters 919 update_counters(); 920 gch->collector_policy()->counters()->update_counters(); 921 } 922 923 void DefNewGeneration::record_spaces_top() { 924 assert(ZapUnusedHeapArea, "Not mangling unused space"); 925 eden()->set_top_for_allocations(); 926 to()->set_top_for_allocations(); 927 from()->set_top_for_allocations(); 928 } 929 930 931 void DefNewGeneration::update_counters() { 932 if (UsePerfData) { 933 _eden_counters->update_all(); 934 _from_counters->update_all(); 935 _to_counters->update_all(); 936 _gen_counters->update_all(); 937 } 938 } 939 940 void DefNewGeneration::verify(bool allow_dirty) { 941 eden()->verify(allow_dirty); 942 from()->verify(allow_dirty); 943 to()->verify(allow_dirty); 944 } 945 946 void DefNewGeneration::print_on(outputStream* st) const { 947 Generation::print_on(st); 948 st->print(" eden"); 949 eden()->print_on(st); 950 st->print(" from"); 951 from()->print_on(st); 952 st->print(" to "); 953 to()->print_on(st); 954 } 955 956 957 const char* DefNewGeneration::name() const { 958 return "def new generation"; 959 } 960 961 // Moved from inline file as they are not called inline 962 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 963 return eden(); 964 } 965 966 HeapWord* DefNewGeneration::allocate(size_t word_size, 967 bool is_tlab) { 968 // This is the slow-path allocation for the DefNewGeneration. 969 // Most allocations are fast-path in compiled code. 970 // We try to allocate from the eden. If that works, we are happy. 971 // Note that since DefNewGeneration supports lock-free allocation, we 972 // have to use it here, as well. 973 HeapWord* result = eden()->par_allocate(word_size); 974 if (result != NULL) { 975 return result; 976 } 977 do { 978 HeapWord* old_limit = eden()->soft_end(); 979 if (old_limit < eden()->end()) { 980 // Tell the next generation we reached a limit. 981 HeapWord* new_limit = 982 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); 983 if (new_limit != NULL) { 984 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); 985 } else { 986 assert(eden()->soft_end() == eden()->end(), 987 "invalid state after allocation_limit_reached returned null"); 988 } 989 } else { 990 // The allocation failed and the soft limit is equal to the hard limit, 991 // there are no reasons to do an attempt to allocate 992 assert(old_limit == eden()->end(), "sanity check"); 993 break; 994 } 995 // Try to allocate until succeeded or the soft limit can't be adjusted 996 result = eden()->par_allocate(word_size); 997 } while (result == NULL); 998 999 // If the eden is full and the last collection bailed out, we are running 1000 // out of heap space, and we try to allocate the from-space, too. 1001 // allocate_from_space can't be inlined because that would introduce a 1002 // circular dependency at compile time. 1003 if (result == NULL) { 1004 result = allocate_from_space(word_size); 1005 } 1006 return result; 1007 } 1008 1009 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1010 bool is_tlab) { 1011 return eden()->par_allocate(word_size); 1012 } 1013 1014 void DefNewGeneration::gc_prologue(bool full) { 1015 // Ensure that _end and _soft_end are the same in eden space. 1016 eden()->set_soft_end(eden()->end()); 1017 } 1018 1019 size_t DefNewGeneration::tlab_capacity() const { 1020 return eden()->capacity(); 1021 } 1022 1023 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1024 return unsafe_max_alloc_nogc(); 1025 }