1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/collectorCounters.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/gcHeapSummary.hpp" 29 #include "gc_implementation/shared/gcTimer.hpp" 30 #include "gc_implementation/shared/gcTraceTime.hpp" 31 #include "gc_implementation/shared/gcTrace.hpp" 32 #include "gc_implementation/shared/spaceDecorator.hpp" 33 #include "gc_interface/collectedHeap.inline.hpp" 34 #include "memory/defNewGeneration.inline.hpp" 35 #include "memory/gcLocker.inline.hpp" 36 #include "memory/genCollectedHeap.inline.hpp" 37 #include "memory/genOopClosures.inline.hpp" 38 #include "memory/genRemSet.hpp" 39 #include "memory/generationSpec.hpp" 40 #include "memory/iterator.hpp" 41 #include "memory/referencePolicy.hpp" 42 #include "memory/space.inline.hpp" 43 #include "oops/instanceRefKlass.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/atomic.inline.hpp" 46 #include "runtime/java.hpp" 47 #include "runtime/prefetch.inline.hpp" 48 #include "runtime/thread.inline.hpp" 49 #include "utilities/copy.hpp" 50 #include "utilities/globalDefinitions.hpp" 51 #include "utilities/stack.inline.hpp" 52 53 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 54 55 // 56 // DefNewGeneration functions. 57 58 // Methods of protected closure types. 59 60 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { 61 assert(g->level() == 0, "Optimized for youngest gen."); 62 } 63 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 64 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); 65 } 66 67 DefNewGeneration::KeepAliveClosure:: 68 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 69 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); 70 _rs = (CardTableRS*)rs; 71 } 72 73 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 74 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 75 76 77 DefNewGeneration::FastKeepAliveClosure:: 78 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 79 DefNewGeneration::KeepAliveClosure(cl) { 80 _boundary = g->reserved().end(); 81 } 82 83 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 84 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 85 86 DefNewGeneration::EvacuateFollowersClosure:: 87 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, 88 ScanClosure* cur, ScanClosure* older) : 89 _gch(gch), _level(level), 90 _scan_cur_or_nonheap(cur), _scan_older(older) 91 {} 92 93 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 94 do { 95 _gch->gch_oop_since_save_marks_iterate<true>(_level, _scan_cur_or_nonheap, 96 _scan_older); 97 } while (!_gch->no_allocs_since_save_marks(_level)); 98 } 99 100 DefNewGeneration::FastEvacuateFollowersClosure:: 101 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, 102 DefNewGeneration* gen, 103 FastScanClosure* cur, FastScanClosure* older) : 104 _gch(gch), _level(level), _gen(gen), 105 _scan_cur_or_nonheap(cur), _scan_older(older) 106 {} 107 108 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 109 do { 110 _gch->gch_oop_since_save_marks_iterate<true>(_level, _scan_cur_or_nonheap, 111 _scan_older); 112 } while (!_gch->no_allocs_since_save_marks(_level)); 113 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 114 } 115 116 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 117 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 118 { 119 assert(_g->level() == 0, "Optimized for youngest generation"); 120 _boundary = _g->reserved().end(); 121 } 122 123 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 124 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 125 126 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 127 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 128 { 129 assert(_g->level() == 0, "Optimized for youngest generation"); 130 _boundary = _g->reserved().end(); 131 } 132 133 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 134 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 135 136 void KlassScanClosure::do_klass(Klass* klass) { 137 #ifndef PRODUCT 138 if (TraceScavenge) { 139 ResourceMark rm; 140 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", 141 klass, 142 klass->external_name(), 143 klass->has_modified_oops() ? "true" : "false"); 144 } 145 #endif 146 147 // If the klass has not been dirtied we know that there's 148 // no references into the young gen and we can skip it. 149 if (klass->has_modified_oops()) { 150 if (_accumulate_modified_oops) { 151 klass->accumulate_modified_oops(); 152 } 153 154 // Clear this state since we're going to scavenge all the metadata. 155 klass->clear_modified_oops(); 156 157 // Tell the closure which Klass is being scanned so that it can be dirtied 158 // if oops are left pointing into the young gen. 159 _scavenge_closure->set_scanned_klass(klass); 160 161 klass->oops_do(_scavenge_closure); 162 163 _scavenge_closure->set_scanned_klass(NULL); 164 } 165 } 166 167 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 168 _g(g) 169 { 170 assert(_g->level() == 0, "Optimized for youngest generation"); 171 _boundary = _g->reserved().end(); 172 } 173 174 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 175 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 176 177 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 178 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 179 180 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, 181 KlassRemSet* klass_rem_set) 182 : _scavenge_closure(scavenge_closure), 183 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} 184 185 186 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 187 size_t initial_size, 188 int level, 189 const char* policy) 190 : Generation(rs, initial_size, level, _dispatch_index_generation_def_new), 191 _promo_failure_drain_in_progress(false), 192 _should_allocate_from_space(false) 193 { 194 MemRegion cmr((HeapWord*)_virtual_space.low(), 195 (HeapWord*)_virtual_space.high()); 196 Universe::heap()->barrier_set()->resize_covered_region(cmr); 197 198 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { 199 _eden_space = new ConcEdenSpace(this); 200 } else { 201 _eden_space = new EdenSpace(this); 202 } 203 _from_space = new ContiguousSpace(); 204 _to_space = new ContiguousSpace(); 205 206 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) 207 vm_exit_during_initialization("Could not allocate a new gen space"); 208 209 // Compute the maximum eden and survivor space sizes. These sizes 210 // are computed assuming the entire reserved space is committed. 211 // These values are exported as performance counters. 212 uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 213 uintx size = _virtual_space.reserved_size(); 214 _max_survivor_size = compute_survivor_size(size, alignment); 215 _max_eden_size = size - (2*_max_survivor_size); 216 217 // allocate the performance counters 218 219 // Generation counters -- generation 0, 3 subspaces 220 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); 221 _gc_counters = new CollectorCounters(policy, 0); 222 223 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 224 _gen_counters); 225 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 226 _gen_counters); 227 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 228 _gen_counters); 229 230 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 231 update_counters(); 232 _next_gen = NULL; 233 _tenuring_threshold = MaxTenuringThreshold; 234 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 235 236 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 237 } 238 239 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 240 bool clear_space, 241 bool mangle_space) { 242 uintx alignment = 243 GenCollectedHeap::heap()->collector_policy()->space_alignment(); 244 245 // If the spaces are being cleared (only done at heap initialization 246 // currently), the survivor spaces need not be empty. 247 // Otherwise, no care is taken for used areas in the survivor spaces 248 // so check. 249 assert(clear_space || (to()->is_empty() && from()->is_empty()), 250 "Initialization of the survivor spaces assumes these are empty"); 251 252 // Compute sizes 253 uintx size = _virtual_space.committed_size(); 254 uintx survivor_size = compute_survivor_size(size, alignment); 255 uintx eden_size = size - (2*survivor_size); 256 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 257 258 if (eden_size < minimum_eden_size) { 259 // May happen due to 64Kb rounding, if so adjust eden size back up 260 minimum_eden_size = align_size_up(minimum_eden_size, alignment); 261 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 262 uintx unaligned_survivor_size = 263 align_size_down(maximum_survivor_size, alignment); 264 survivor_size = MAX2(unaligned_survivor_size, alignment); 265 eden_size = size - (2*survivor_size); 266 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 267 assert(eden_size >= minimum_eden_size, "just checking"); 268 } 269 270 char *eden_start = _virtual_space.low(); 271 char *from_start = eden_start + eden_size; 272 char *to_start = from_start + survivor_size; 273 char *to_end = to_start + survivor_size; 274 275 assert(to_end == _virtual_space.high(), "just checking"); 276 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); 277 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); 278 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); 279 280 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 281 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 282 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 283 284 // A minimum eden size implies that there is a part of eden that 285 // is being used and that affects the initialization of any 286 // newly formed eden. 287 bool live_in_eden = minimum_eden_size > 0; 288 289 // If not clearing the spaces, do some checking to verify that 290 // the space are already mangled. 291 if (!clear_space) { 292 // Must check mangling before the spaces are reshaped. Otherwise, 293 // the bottom or end of one space may have moved into another 294 // a failure of the check may not correctly indicate which space 295 // is not properly mangled. 296 if (ZapUnusedHeapArea) { 297 HeapWord* limit = (HeapWord*) _virtual_space.high(); 298 eden()->check_mangled_unused_area(limit); 299 from()->check_mangled_unused_area(limit); 300 to()->check_mangled_unused_area(limit); 301 } 302 } 303 304 // Reset the spaces for their new regions. 305 eden()->initialize(edenMR, 306 clear_space && !live_in_eden, 307 SpaceDecorator::Mangle); 308 // If clear_space and live_in_eden, we will not have cleared any 309 // portion of eden above its top. This can cause newly 310 // expanded space not to be mangled if using ZapUnusedHeapArea. 311 // We explicitly do such mangling here. 312 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 313 eden()->mangle_unused_area(); 314 } 315 from()->initialize(fromMR, clear_space, mangle_space); 316 to()->initialize(toMR, clear_space, mangle_space); 317 318 // Set next compaction spaces. 319 eden()->set_next_compaction_space(from()); 320 // The to-space is normally empty before a compaction so need 321 // not be considered. The exception is during promotion 322 // failure handling when to-space can contain live objects. 323 from()->set_next_compaction_space(NULL); 324 } 325 326 void DefNewGeneration::swap_spaces() { 327 ContiguousSpace* s = from(); 328 _from_space = to(); 329 _to_space = s; 330 eden()->set_next_compaction_space(from()); 331 // The to-space is normally empty before a compaction so need 332 // not be considered. The exception is during promotion 333 // failure handling when to-space can contain live objects. 334 from()->set_next_compaction_space(NULL); 335 336 if (UsePerfData) { 337 CSpaceCounters* c = _from_counters; 338 _from_counters = _to_counters; 339 _to_counters = c; 340 } 341 } 342 343 bool DefNewGeneration::expand(size_t bytes) { 344 MutexLocker x(ExpandHeap_lock); 345 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 346 bool success = _virtual_space.expand_by(bytes); 347 if (success && ZapUnusedHeapArea) { 348 // Mangle newly committed space immediately because it 349 // can be done here more simply that after the new 350 // spaces have been computed. 351 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 352 MemRegion mangle_region(prev_high, new_high); 353 SpaceMangler::mangle_region(mangle_region); 354 } 355 356 // Do not attempt an expand-to-the reserve size. The 357 // request should properly observe the maximum size of 358 // the generation so an expand-to-reserve should be 359 // unnecessary. Also a second call to expand-to-reserve 360 // value potentially can cause an undue expansion. 361 // For example if the first expand fail for unknown reasons, 362 // but the second succeeds and expands the heap to its maximum 363 // value. 364 if (GC_locker::is_active()) { 365 if (PrintGC && Verbose) { 366 gclog_or_tty->print_cr("Garbage collection disabled, " 367 "expanded heap instead"); 368 } 369 } 370 371 return success; 372 } 373 374 375 void DefNewGeneration::compute_new_size() { 376 // This is called after a gc that includes the following generation 377 // (which is required to exist.) So from-space will normally be empty. 378 // Note that we check both spaces, since if scavenge failed they revert roles. 379 // If not we bail out (otherwise we would have to relocate the objects) 380 if (!from()->is_empty() || !to()->is_empty()) { 381 return; 382 } 383 384 int next_level = level() + 1; 385 GenCollectedHeap* gch = GenCollectedHeap::heap(); 386 assert(next_level < gch->_n_gens, 387 "DefNewGeneration cannot be an oldest gen"); 388 389 Generation* next_gen = gch->_gens[next_level]; 390 size_t old_size = next_gen->capacity(); 391 size_t new_size_before = _virtual_space.committed_size(); 392 size_t min_new_size = spec()->init_size(); 393 size_t max_new_size = reserved().byte_size(); 394 assert(min_new_size <= new_size_before && 395 new_size_before <= max_new_size, 396 "just checking"); 397 // All space sizes must be multiples of Generation::GenGrain. 398 size_t alignment = Generation::GenGrain; 399 400 // Compute desired new generation size based on NewRatio and 401 // NewSizeThreadIncrease 402 size_t desired_new_size = old_size/NewRatio; 403 int threads_count = Threads::number_of_non_daemon_threads(); 404 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; 405 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); 406 407 // Adjust new generation size 408 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 409 assert(desired_new_size <= max_new_size, "just checking"); 410 411 bool changed = false; 412 if (desired_new_size > new_size_before) { 413 size_t change = desired_new_size - new_size_before; 414 assert(change % alignment == 0, "just checking"); 415 if (expand(change)) { 416 changed = true; 417 } 418 // If the heap failed to expand to the desired size, 419 // "changed" will be false. If the expansion failed 420 // (and at this point it was expected to succeed), 421 // ignore the failure (leaving "changed" as false). 422 } 423 if (desired_new_size < new_size_before && eden()->is_empty()) { 424 // bail out of shrinking if objects in eden 425 size_t change = new_size_before - desired_new_size; 426 assert(change % alignment == 0, "just checking"); 427 _virtual_space.shrink_by(change); 428 changed = true; 429 } 430 if (changed) { 431 // The spaces have already been mangled at this point but 432 // may not have been cleared (set top = bottom) and should be. 433 // Mangling was done when the heap was being expanded. 434 compute_space_boundaries(eden()->used(), 435 SpaceDecorator::Clear, 436 SpaceDecorator::DontMangle); 437 MemRegion cmr((HeapWord*)_virtual_space.low(), 438 (HeapWord*)_virtual_space.high()); 439 Universe::heap()->barrier_set()->resize_covered_region(cmr); 440 if (Verbose && PrintGC) { 441 size_t new_size_after = _virtual_space.committed_size(); 442 size_t eden_size_after = eden()->capacity(); 443 size_t survivor_size_after = from()->capacity(); 444 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" 445 SIZE_FORMAT "K [eden=" 446 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 447 new_size_before/K, new_size_after/K, 448 eden_size_after/K, survivor_size_after/K); 449 if (WizardMode) { 450 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 451 thread_increase_size/K, threads_count); 452 } 453 gclog_or_tty->cr(); 454 } 455 } 456 } 457 458 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 459 assert(false, "NYI -- are you sure you want to call this?"); 460 } 461 462 463 size_t DefNewGeneration::capacity() const { 464 return eden()->capacity() 465 + from()->capacity(); // to() is only used during scavenge 466 } 467 468 469 size_t DefNewGeneration::used() const { 470 return eden()->used() 471 + from()->used(); // to() is only used during scavenge 472 } 473 474 475 size_t DefNewGeneration::free() const { 476 return eden()->free() 477 + from()->free(); // to() is only used during scavenge 478 } 479 480 size_t DefNewGeneration::max_capacity() const { 481 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 482 const size_t reserved_bytes = reserved().byte_size(); 483 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 484 } 485 486 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 487 return eden()->free(); 488 } 489 490 size_t DefNewGeneration::capacity_before_gc() const { 491 return eden()->capacity(); 492 } 493 494 size_t DefNewGeneration::contiguous_available() const { 495 return eden()->free(); 496 } 497 498 499 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } 500 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 501 502 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 503 eden()->object_iterate(blk); 504 from()->object_iterate(blk); 505 } 506 507 508 void DefNewGeneration::space_iterate(SpaceClosure* blk, 509 bool usedOnly) { 510 blk->do_space(eden()); 511 blk->do_space(from()); 512 blk->do_space(to()); 513 } 514 515 // The last collection bailed out, we are running out of heap space, 516 // so we try to allocate the from-space, too. 517 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 518 HeapWord* result = NULL; 519 if (Verbose && PrintGCDetails) { 520 gclog_or_tty->print("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):" 521 " will_fail: %s" 522 " heap_lock: %s" 523 " free: " SIZE_FORMAT, 524 size, 525 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 526 "true" : "false", 527 Heap_lock->is_locked() ? "locked" : "unlocked", 528 from()->free()); 529 } 530 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { 531 if (Heap_lock->owned_by_self() || 532 (SafepointSynchronize::is_at_safepoint() && 533 Thread::current()->is_VM_thread())) { 534 // If the Heap_lock is not locked by this thread, this will be called 535 // again later with the Heap_lock held. 536 result = from()->allocate(size); 537 } else if (PrintGC && Verbose) { 538 gclog_or_tty->print_cr(" Heap_lock is not owned by self"); 539 } 540 } else if (PrintGC && Verbose) { 541 gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); 542 } 543 if (PrintGC && Verbose) { 544 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); 545 } 546 return result; 547 } 548 549 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 550 bool is_tlab, 551 bool parallel) { 552 // We don't attempt to expand the young generation (but perhaps we should.) 553 return allocate(size, is_tlab); 554 } 555 556 void DefNewGeneration::adjust_desired_tenuring_threshold() { 557 // Set the desired survivor size to half the real survivor space 558 _tenuring_threshold = 559 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 560 } 561 562 void DefNewGeneration::collect(bool full, 563 bool clear_all_soft_refs, 564 size_t size, 565 bool is_tlab) { 566 assert(full || size > 0, "otherwise we don't want to collect"); 567 568 GenCollectedHeap* gch = GenCollectedHeap::heap(); 569 570 _gc_timer->register_gc_start(); 571 DefNewTracer gc_tracer; 572 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 573 574 _next_gen = gch->next_gen(this); 575 576 // If the next generation is too full to accommodate promotion 577 // from this generation, pass on collection; let the next generation 578 // do it. 579 if (!collection_attempt_is_safe()) { 580 if (Verbose && PrintGCDetails) { 581 gclog_or_tty->print(" :: Collection attempt not safe :: "); 582 } 583 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 584 return; 585 } 586 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 587 588 init_assuming_no_promotion_failure(); 589 590 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); 591 // Capture heap used before collection (for printing). 592 size_t gch_prev_used = gch->used(); 593 594 gch->trace_heap_before_gc(&gc_tracer); 595 596 SpecializationStats::clear(); 597 598 // These can be shared for all code paths 599 IsAliveClosure is_alive(this); 600 ScanWeakRefClosure scan_weak_ref(this); 601 602 age_table()->clear(); 603 to()->clear(SpaceDecorator::Mangle); 604 605 gch->rem_set()->prepare_for_younger_refs_iterate(false); 606 607 assert(gch->no_allocs_since_save_marks(0), 608 "save marks have not been newly set."); 609 610 // Not very pretty. 611 CollectorPolicy* cp = gch->collector_policy(); 612 613 FastScanClosure fsc_with_no_gc_barrier(this, false); 614 FastScanClosure fsc_with_gc_barrier(this, true); 615 616 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, 617 gch->rem_set()->klass_rem_set()); 618 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 619 &fsc_with_no_gc_barrier, 620 false); 621 622 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 623 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, 624 &fsc_with_no_gc_barrier, 625 &fsc_with_gc_barrier); 626 627 assert(gch->no_allocs_since_save_marks(0), 628 "save marks have not been newly set."); 629 630 gch->gen_process_roots(_level, 631 true, // Process younger gens, if any, 632 // as strong roots. 633 true, // activate StrongRootsScope 634 SharedHeap::SO_ScavengeCodeCache, 635 GenCollectedHeap::StrongAndWeakRoots, 636 &fsc_with_no_gc_barrier, 637 &fsc_with_gc_barrier, 638 &cld_scan_closure); 639 640 // "evacuate followers". 641 evacuate_followers.do_void(); 642 643 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 644 ReferenceProcessor* rp = ref_processor(); 645 rp->setup_policy(clear_all_soft_refs); 646 const ReferenceProcessorStats& stats = 647 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 648 NULL, _gc_timer, gc_tracer.gc_id()); 649 gc_tracer.report_gc_reference_stats(stats); 650 651 if (!_promotion_failed) { 652 // Swap the survivor spaces. 653 eden()->clear(SpaceDecorator::Mangle); 654 from()->clear(SpaceDecorator::Mangle); 655 if (ZapUnusedHeapArea) { 656 // This is now done here because of the piece-meal mangling which 657 // can check for valid mangling at intermediate points in the 658 // collection(s). When a minor collection fails to collect 659 // sufficient space resizing of the young generation can occur 660 // an redistribute the spaces in the young generation. Mangle 661 // here so that unzapped regions don't get distributed to 662 // other spaces. 663 to()->mangle_unused_area(); 664 } 665 swap_spaces(); 666 667 assert(to()->is_empty(), "to space should be empty now"); 668 669 adjust_desired_tenuring_threshold(); 670 671 // A successful scavenge should restart the GC time limit count which is 672 // for full GC's. 673 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 674 size_policy->reset_gc_overhead_limit_count(); 675 assert(!gch->incremental_collection_failed(), "Should be clear"); 676 } else { 677 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 678 _promo_failure_scan_stack.clear(true); // Clear cached segments. 679 680 remove_forwarding_pointers(); 681 if (PrintGCDetails) { 682 gclog_or_tty->print(" (promotion failed) "); 683 } 684 // Add to-space to the list of space to compact 685 // when a promotion failure has occurred. In that 686 // case there can be live objects in to-space 687 // as a result of a partial evacuation of eden 688 // and from-space. 689 swap_spaces(); // For uniformity wrt ParNewGeneration. 690 from()->set_next_compaction_space(to()); 691 gch->set_incremental_collection_failed(); 692 693 // Inform the next generation that a promotion failure occurred. 694 _next_gen->promotion_failure_occurred(); 695 gc_tracer.report_promotion_failed(_promotion_failed_info); 696 697 // Reset the PromotionFailureALot counters. 698 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 699 } 700 if (PrintGC && !PrintGCDetails) { 701 gch->print_heap_change(gch_prev_used); 702 } 703 // set new iteration safe limit for the survivor spaces 704 from()->set_concurrent_iteration_safe_limit(from()->top()); 705 to()->set_concurrent_iteration_safe_limit(to()->top()); 706 SpecializationStats::print(); 707 708 // We need to use a monotonically non-decreasing time in ms 709 // or we will see time-warp warnings and os::javaTimeMillis() 710 // does not guarantee monotonicity. 711 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 712 update_time_of_last_gc(now); 713 714 gch->trace_heap_after_gc(&gc_tracer); 715 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 716 717 _gc_timer->register_gc_end(); 718 719 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 720 } 721 722 class RemoveForwardPointerClosure: public ObjectClosure { 723 public: 724 void do_object(oop obj) { 725 obj->init_mark(); 726 } 727 }; 728 729 void DefNewGeneration::init_assuming_no_promotion_failure() { 730 _promotion_failed = false; 731 _promotion_failed_info.reset(); 732 from()->set_next_compaction_space(NULL); 733 } 734 735 void DefNewGeneration::remove_forwarding_pointers() { 736 RemoveForwardPointerClosure rspc; 737 eden()->object_iterate(&rspc); 738 from()->object_iterate(&rspc); 739 740 // Now restore saved marks, if any. 741 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), 742 "should be the same"); 743 while (!_objs_with_preserved_marks.is_empty()) { 744 oop obj = _objs_with_preserved_marks.pop(); 745 markOop m = _preserved_marks_of_objs.pop(); 746 obj->set_mark(m); 747 } 748 _objs_with_preserved_marks.clear(true); 749 _preserved_marks_of_objs.clear(true); 750 } 751 752 void DefNewGeneration::preserve_mark(oop obj, markOop m) { 753 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), 754 "Oversaving!"); 755 _objs_with_preserved_marks.push(obj); 756 _preserved_marks_of_objs.push(m); 757 } 758 759 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 760 if (m->must_be_preserved_for_promotion_failure(obj)) { 761 preserve_mark(obj, m); 762 } 763 } 764 765 void DefNewGeneration::handle_promotion_failure(oop old) { 766 if (PrintPromotionFailure && !_promotion_failed) { 767 gclog_or_tty->print(" (promotion failure size = %d) ", 768 old->size()); 769 } 770 _promotion_failed = true; 771 _promotion_failed_info.register_copy_failure(old->size()); 772 preserve_mark_if_necessary(old, old->mark()); 773 // forward to self 774 old->forward_to(old); 775 776 _promo_failure_scan_stack.push(old); 777 778 if (!_promo_failure_drain_in_progress) { 779 // prevent recursion in copy_to_survivor_space() 780 _promo_failure_drain_in_progress = true; 781 drain_promo_failure_scan_stack(); 782 _promo_failure_drain_in_progress = false; 783 } 784 } 785 786 oop DefNewGeneration::copy_to_survivor_space(oop old) { 787 assert(is_in_reserved(old) && !old->is_forwarded(), 788 "shouldn't be scavenging this oop"); 789 size_t s = old->size(); 790 oop obj = NULL; 791 792 // Try allocating obj in to-space (unless too old) 793 if (old->age() < tenuring_threshold()) { 794 obj = (oop) to()->allocate_aligned(s); 795 } 796 797 // Otherwise try allocating obj tenured 798 if (obj == NULL) { 799 obj = _next_gen->promote(old, s); 800 if (obj == NULL) { 801 handle_promotion_failure(old); 802 return old; 803 } 804 } else { 805 // Prefetch beyond obj 806 const intx interval = PrefetchCopyIntervalInBytes; 807 Prefetch::write(obj, interval); 808 809 // Copy obj 810 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 811 812 // Increment age if obj still in new generation 813 obj->incr_age(); 814 age_table()->add(obj, s); 815 } 816 817 // Done, insert forward pointer to obj in this header 818 old->forward_to(obj); 819 820 return obj; 821 } 822 823 void DefNewGeneration::drain_promo_failure_scan_stack() { 824 while (!_promo_failure_scan_stack.is_empty()) { 825 oop obj = _promo_failure_scan_stack.pop(); 826 obj->oop_iterate<false>(_promo_failure_scan_stack_closure); 827 } 828 } 829 830 void DefNewGeneration::save_marks() { 831 eden()->set_saved_mark(); 832 to()->set_saved_mark(); 833 from()->set_saved_mark(); 834 } 835 836 837 void DefNewGeneration::reset_saved_marks() { 838 eden()->reset_saved_mark(); 839 to()->reset_saved_mark(); 840 from()->reset_saved_mark(); 841 } 842 843 844 bool DefNewGeneration::no_allocs_since_save_marks() { 845 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 846 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 847 return to()->saved_mark_at_top(); 848 } 849 850 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 851 size_t max_alloc_words) { 852 if (requestor == this || _promotion_failed) return; 853 assert(requestor->level() > level(), "DefNewGeneration must be youngest"); 854 855 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 856 if (to_space->top() > to_space->bottom()) { 857 trace("to_space not empty when contribute_scratch called"); 858 } 859 */ 860 861 ContiguousSpace* to_space = to(); 862 assert(to_space->end() >= to_space->top(), "pointers out of order"); 863 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 864 if (free_words >= MinFreeScratchWords) { 865 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 866 sb->num_words = free_words; 867 sb->next = list; 868 list = sb; 869 } 870 } 871 872 void DefNewGeneration::reset_scratch() { 873 // If contributing scratch in to_space, mangle all of 874 // to_space if ZapUnusedHeapArea. This is needed because 875 // top is not maintained while using to-space as scratch. 876 if (ZapUnusedHeapArea) { 877 to()->mangle_unused_area_complete(); 878 } 879 } 880 881 bool DefNewGeneration::collection_attempt_is_safe() { 882 if (!to()->is_empty()) { 883 if (Verbose && PrintGCDetails) { 884 gclog_or_tty->print(" :: to is not empty :: "); 885 } 886 return false; 887 } 888 if (_next_gen == NULL) { 889 GenCollectedHeap* gch = GenCollectedHeap::heap(); 890 _next_gen = gch->next_gen(this); 891 } 892 return _next_gen->promotion_attempt_is_safe(used()); 893 } 894 895 void DefNewGeneration::gc_epilogue(bool full) { 896 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 897 898 assert(!GC_locker::is_active(), "We should not be executing here"); 899 // Check if the heap is approaching full after a collection has 900 // been done. Generally the young generation is empty at 901 // a minimum at the end of a collection. If it is not, then 902 // the heap is approaching full. 903 GenCollectedHeap* gch = GenCollectedHeap::heap(); 904 if (full) { 905 DEBUG_ONLY(seen_incremental_collection_failed = false;) 906 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 907 if (Verbose && PrintGCDetails) { 908 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 909 GCCause::to_string(gch->gc_cause())); 910 } 911 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 912 set_should_allocate_from_space(); // we seem to be running out of space 913 } else { 914 if (Verbose && PrintGCDetails) { 915 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 916 GCCause::to_string(gch->gc_cause())); 917 } 918 gch->clear_incremental_collection_failed(); // We just did a full collection 919 clear_should_allocate_from_space(); // if set 920 } 921 } else { 922 #ifdef ASSERT 923 // It is possible that incremental_collection_failed() == true 924 // here, because an attempted scavenge did not succeed. The policy 925 // is normally expected to cause a full collection which should 926 // clear that condition, so we should not be here twice in a row 927 // with incremental_collection_failed() == true without having done 928 // a full collection in between. 929 if (!seen_incremental_collection_failed && 930 gch->incremental_collection_failed()) { 931 if (Verbose && PrintGCDetails) { 932 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 933 GCCause::to_string(gch->gc_cause())); 934 } 935 seen_incremental_collection_failed = true; 936 } else if (seen_incremental_collection_failed) { 937 if (Verbose && PrintGCDetails) { 938 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 939 GCCause::to_string(gch->gc_cause())); 940 } 941 assert(gch->gc_cause() == GCCause::_scavenge_alot || 942 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 943 !gch->incremental_collection_failed(), 944 "Twice in a row"); 945 seen_incremental_collection_failed = false; 946 } 947 #endif // ASSERT 948 } 949 950 if (ZapUnusedHeapArea) { 951 eden()->check_mangled_unused_area_complete(); 952 from()->check_mangled_unused_area_complete(); 953 to()->check_mangled_unused_area_complete(); 954 } 955 956 if (!CleanChunkPoolAsync) { 957 Chunk::clean_chunk_pool(); 958 } 959 960 // update the generation and space performance counters 961 update_counters(); 962 gch->collector_policy()->counters()->update_counters(); 963 } 964 965 void DefNewGeneration::record_spaces_top() { 966 assert(ZapUnusedHeapArea, "Not mangling unused space"); 967 eden()->set_top_for_allocations(); 968 to()->set_top_for_allocations(); 969 from()->set_top_for_allocations(); 970 } 971 972 void DefNewGeneration::ref_processor_init() { 973 Generation::ref_processor_init(); 974 } 975 976 977 void DefNewGeneration::update_counters() { 978 if (UsePerfData) { 979 _eden_counters->update_all(); 980 _from_counters->update_all(); 981 _to_counters->update_all(); 982 _gen_counters->update_all(); 983 } 984 } 985 986 void DefNewGeneration::verify() { 987 eden()->verify(); 988 from()->verify(); 989 to()->verify(); 990 } 991 992 void DefNewGeneration::print_on(outputStream* st) const { 993 Generation::print_on(st); 994 st->print(" eden"); 995 eden()->print_on(st); 996 st->print(" from"); 997 from()->print_on(st); 998 st->print(" to "); 999 to()->print_on(st); 1000 } 1001 1002 1003 const char* DefNewGeneration::name() const { 1004 return "def new generation"; 1005 } 1006 1007 // Moved from inline file as they are not called inline 1008 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 1009 return eden(); 1010 } 1011 1012 HeapWord* DefNewGeneration::allocate(size_t word_size, 1013 bool is_tlab) { 1014 // This is the slow-path allocation for the DefNewGeneration. 1015 // Most allocations are fast-path in compiled code. 1016 // We try to allocate from the eden. If that works, we are happy. 1017 // Note that since DefNewGeneration supports lock-free allocation, we 1018 // have to use it here, as well. 1019 HeapWord* result = eden()->par_allocate(word_size); 1020 if (result != NULL) { 1021 if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1022 _next_gen->sample_eden_chunk(); 1023 } 1024 return result; 1025 } 1026 do { 1027 HeapWord* old_limit = eden()->soft_end(); 1028 if (old_limit < eden()->end()) { 1029 // Tell the next generation we reached a limit. 1030 HeapWord* new_limit = 1031 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); 1032 if (new_limit != NULL) { 1033 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); 1034 } else { 1035 assert(eden()->soft_end() == eden()->end(), 1036 "invalid state after allocation_limit_reached returned null"); 1037 } 1038 } else { 1039 // The allocation failed and the soft limit is equal to the hard limit, 1040 // there are no reasons to do an attempt to allocate 1041 assert(old_limit == eden()->end(), "sanity check"); 1042 break; 1043 } 1044 // Try to allocate until succeeded or the soft limit can't be adjusted 1045 result = eden()->par_allocate(word_size); 1046 } while (result == NULL); 1047 1048 // If the eden is full and the last collection bailed out, we are running 1049 // out of heap space, and we try to allocate the from-space, too. 1050 // allocate_from_space can't be inlined because that would introduce a 1051 // circular dependency at compile time. 1052 if (result == NULL) { 1053 result = allocate_from_space(word_size); 1054 } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1055 _next_gen->sample_eden_chunk(); 1056 } 1057 return result; 1058 } 1059 1060 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1061 bool is_tlab) { 1062 HeapWord* res = eden()->par_allocate(word_size); 1063 if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1064 _next_gen->sample_eden_chunk(); 1065 } 1066 return res; 1067 } 1068 1069 void DefNewGeneration::gc_prologue(bool full) { 1070 // Ensure that _end and _soft_end are the same in eden space. 1071 eden()->set_soft_end(eden()->end()); 1072 } 1073 1074 size_t DefNewGeneration::tlab_capacity() const { 1075 return eden()->capacity(); 1076 } 1077 1078 size_t DefNewGeneration::tlab_used() const { 1079 return eden()->used(); 1080 } 1081 1082 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1083 return unsafe_max_alloc_nogc(); 1084 }