1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/collectorCounters.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/gcHeapSummary.hpp" 29 #include "gc_implementation/shared/gcTimer.hpp" 30 #include "gc_implementation/shared/gcTraceTime.hpp" 31 #include "gc_implementation/shared/gcTrace.hpp" 32 #include "gc_implementation/shared/spaceDecorator.hpp" 33 #include "memory/defNewGeneration.inline.hpp" 34 #include "memory/gcLocker.inline.hpp" 35 #include "memory/genCollectedHeap.hpp" 36 #include "memory/genOopClosures.inline.hpp" 37 #include "memory/genRemSet.hpp" 38 #include "memory/generationSpec.hpp" 39 #include "memory/iterator.hpp" 40 #include "memory/referencePolicy.hpp" 41 #include "memory/space.inline.hpp" 42 #include "oops/instanceRefKlass.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "runtime/atomic.inline.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/prefetch.inline.hpp" 47 #include "runtime/thread.inline.hpp" 48 #include "utilities/copy.hpp" 49 #include "utilities/globalDefinitions.hpp" 50 #include "utilities/stack.inline.hpp" 51 52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 53 54 // 55 // DefNewGeneration functions. 56 57 // Methods of protected closure types. 58 59 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { } 60 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 61 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); 62 } 63 64 DefNewGeneration::KeepAliveClosure:: 65 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 66 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); 67 _rs = (CardTableRS*)rs; 68 } 69 70 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 71 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 72 73 74 DefNewGeneration::FastKeepAliveClosure:: 75 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 76 DefNewGeneration::KeepAliveClosure(cl) { 77 _boundary = g->reserved().end(); 78 } 79 80 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 81 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 82 83 DefNewGeneration::EvacuateFollowersClosure:: 84 EvacuateFollowersClosure(GenCollectedHeap* gch, 85 ScanClosure* cur, ScanClosure* older) : 86 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 87 {} 88 89 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 90 do { 91 _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, 92 _scan_older); 93 } while (!_gch->no_allocs_since_save_marks(Generation::Young)); 94 } 95 96 DefNewGeneration::FastEvacuateFollowersClosure:: 97 FastEvacuateFollowersClosure(GenCollectedHeap* gch, 98 DefNewGeneration* gen, 99 FastScanClosure* cur, FastScanClosure* older) : 100 _gch(gch), _gen(gen), _scan_cur_or_nonheap(cur), _scan_older(older) 101 {} 102 103 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 104 do { 105 _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, 106 _scan_older); 107 } while (!_gch->no_allocs_since_save_marks(Generation::Young)); 108 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 109 } 110 111 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 112 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 113 { 114 _boundary = _g->reserved().end(); 115 } 116 117 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 118 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 119 120 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 121 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 122 { 123 _boundary = _g->reserved().end(); 124 } 125 126 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 127 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 128 129 void KlassScanClosure::do_klass(Klass* klass) { 130 #ifndef PRODUCT 131 if (TraceScavenge) { 132 ResourceMark rm; 133 gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", 134 klass, 135 klass->external_name(), 136 klass->has_modified_oops() ? "true" : "false"); 137 } 138 #endif 139 140 // If the klass has not been dirtied we know that there's 141 // no references into the young gen and we can skip it. 142 if (klass->has_modified_oops()) { 143 if (_accumulate_modified_oops) { 144 klass->accumulate_modified_oops(); 145 } 146 147 // Clear this state since we're going to scavenge all the metadata. 148 klass->clear_modified_oops(); 149 150 // Tell the closure which Klass is being scanned so that it can be dirtied 151 // if oops are left pointing into the young gen. 152 _scavenge_closure->set_scanned_klass(klass); 153 154 klass->oops_do(_scavenge_closure); 155 156 _scavenge_closure->set_scanned_klass(NULL); 157 } 158 } 159 160 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 161 _g(g) 162 { 163 _boundary = _g->reserved().end(); 164 } 165 166 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 167 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 168 169 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 170 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 171 172 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, 173 KlassRemSet* klass_rem_set) 174 : _scavenge_closure(scavenge_closure), 175 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} 176 177 178 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 179 size_t initial_size, 180 const char* policy) 181 : Generation(rs, initial_size), 182 _promo_failure_drain_in_progress(false), 183 _should_allocate_from_space(false) 184 { 185 MemRegion cmr((HeapWord*)_virtual_space.low(), 186 (HeapWord*)_virtual_space.high()); 187 Universe::heap()->barrier_set()->resize_covered_region(cmr); 188 189 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { 190 _eden_space = new ConcEdenSpace(this); 191 } else { 192 _eden_space = new EdenSpace(this); 193 } 194 _from_space = new ContiguousSpace(); 195 _to_space = new ContiguousSpace(); 196 197 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) 198 vm_exit_during_initialization("Could not allocate a new gen space"); 199 200 // Compute the maximum eden and survivor space sizes. These sizes 201 // are computed assuming the entire reserved space is committed. 202 // These values are exported as performance counters. 203 uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 204 uintx size = _virtual_space.reserved_size(); 205 _max_survivor_size = compute_survivor_size(size, alignment); 206 _max_eden_size = size - (2*_max_survivor_size); 207 208 // allocate the performance counters 209 GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy(); 210 211 // Generation counters -- generation 0, 3 subspaces 212 _gen_counters = new GenerationCounters("new", 0, 3, 213 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space); 214 _gc_counters = new CollectorCounters(policy, 0); 215 216 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 217 _gen_counters); 218 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 219 _gen_counters); 220 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 221 _gen_counters); 222 223 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 224 update_counters(); 225 _old_gen = NULL; 226 _tenuring_threshold = MaxTenuringThreshold; 227 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 228 229 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 230 } 231 232 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 233 bool clear_space, 234 bool mangle_space) { 235 uintx alignment = 236 GenCollectedHeap::heap()->collector_policy()->space_alignment(); 237 238 // If the spaces are being cleared (only done at heap initialization 239 // currently), the survivor spaces need not be empty. 240 // Otherwise, no care is taken for used areas in the survivor spaces 241 // so check. 242 assert(clear_space || (to()->is_empty() && from()->is_empty()), 243 "Initialization of the survivor spaces assumes these are empty"); 244 245 // Compute sizes 246 uintx size = _virtual_space.committed_size(); 247 uintx survivor_size = compute_survivor_size(size, alignment); 248 uintx eden_size = size - (2*survivor_size); 249 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 250 251 if (eden_size < minimum_eden_size) { 252 // May happen due to 64Kb rounding, if so adjust eden size back up 253 minimum_eden_size = align_size_up(minimum_eden_size, alignment); 254 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 255 uintx unaligned_survivor_size = 256 align_size_down(maximum_survivor_size, alignment); 257 survivor_size = MAX2(unaligned_survivor_size, alignment); 258 eden_size = size - (2*survivor_size); 259 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 260 assert(eden_size >= minimum_eden_size, "just checking"); 261 } 262 263 char *eden_start = _virtual_space.low(); 264 char *from_start = eden_start + eden_size; 265 char *to_start = from_start + survivor_size; 266 char *to_end = to_start + survivor_size; 267 268 assert(to_end == _virtual_space.high(), "just checking"); 269 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); 270 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); 271 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); 272 273 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 274 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 275 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 276 277 // A minimum eden size implies that there is a part of eden that 278 // is being used and that affects the initialization of any 279 // newly formed eden. 280 bool live_in_eden = minimum_eden_size > 0; 281 282 // If not clearing the spaces, do some checking to verify that 283 // the space are already mangled. 284 if (!clear_space) { 285 // Must check mangling before the spaces are reshaped. Otherwise, 286 // the bottom or end of one space may have moved into another 287 // a failure of the check may not correctly indicate which space 288 // is not properly mangled. 289 if (ZapUnusedHeapArea) { 290 HeapWord* limit = (HeapWord*) _virtual_space.high(); 291 eden()->check_mangled_unused_area(limit); 292 from()->check_mangled_unused_area(limit); 293 to()->check_mangled_unused_area(limit); 294 } 295 } 296 297 // Reset the spaces for their new regions. 298 eden()->initialize(edenMR, 299 clear_space && !live_in_eden, 300 SpaceDecorator::Mangle); 301 // If clear_space and live_in_eden, we will not have cleared any 302 // portion of eden above its top. This can cause newly 303 // expanded space not to be mangled if using ZapUnusedHeapArea. 304 // We explicitly do such mangling here. 305 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 306 eden()->mangle_unused_area(); 307 } 308 from()->initialize(fromMR, clear_space, mangle_space); 309 to()->initialize(toMR, clear_space, mangle_space); 310 311 // Set next compaction spaces. 312 eden()->set_next_compaction_space(from()); 313 // The to-space is normally empty before a compaction so need 314 // not be considered. The exception is during promotion 315 // failure handling when to-space can contain live objects. 316 from()->set_next_compaction_space(NULL); 317 } 318 319 void DefNewGeneration::swap_spaces() { 320 ContiguousSpace* s = from(); 321 _from_space = to(); 322 _to_space = s; 323 eden()->set_next_compaction_space(from()); 324 // The to-space is normally empty before a compaction so need 325 // not be considered. The exception is during promotion 326 // failure handling when to-space can contain live objects. 327 from()->set_next_compaction_space(NULL); 328 329 if (UsePerfData) { 330 CSpaceCounters* c = _from_counters; 331 _from_counters = _to_counters; 332 _to_counters = c; 333 } 334 } 335 336 bool DefNewGeneration::expand(size_t bytes) { 337 MutexLocker x(ExpandHeap_lock); 338 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 339 bool success = _virtual_space.expand_by(bytes); 340 if (success && ZapUnusedHeapArea) { 341 // Mangle newly committed space immediately because it 342 // can be done here more simply that after the new 343 // spaces have been computed. 344 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 345 MemRegion mangle_region(prev_high, new_high); 346 SpaceMangler::mangle_region(mangle_region); 347 } 348 349 // Do not attempt an expand-to-the reserve size. The 350 // request should properly observe the maximum size of 351 // the generation so an expand-to-reserve should be 352 // unnecessary. Also a second call to expand-to-reserve 353 // value potentially can cause an undue expansion. 354 // For example if the first expand fail for unknown reasons, 355 // but the second succeeds and expands the heap to its maximum 356 // value. 357 if (GC_locker::is_active()) { 358 if (PrintGC && Verbose) { 359 gclog_or_tty->print_cr("Garbage collection disabled, " 360 "expanded heap instead"); 361 } 362 } 363 364 return success; 365 } 366 367 368 void DefNewGeneration::compute_new_size() { 369 // This is called after a gc that includes the following generation 370 // (which is required to exist.) So from-space will normally be empty. 371 // Note that we check both spaces, since if scavenge failed they revert roles. 372 // If not we bail out (otherwise we would have to relocate the objects) 373 if (!from()->is_empty() || !to()->is_empty()) { 374 return; 375 } 376 377 GenCollectedHeap* gch = GenCollectedHeap::heap(); 378 379 size_t old_size = gch->old_gen()->capacity(); 380 size_t new_size_before = _virtual_space.committed_size(); 381 size_t min_new_size = spec()->init_size(); 382 size_t max_new_size = reserved().byte_size(); 383 assert(min_new_size <= new_size_before && 384 new_size_before <= max_new_size, 385 "just checking"); 386 // All space sizes must be multiples of Generation::GenGrain. 387 size_t alignment = Generation::GenGrain; 388 389 // Compute desired new generation size based on NewRatio and 390 // NewSizeThreadIncrease 391 size_t desired_new_size = old_size/NewRatio; 392 int threads_count = Threads::number_of_non_daemon_threads(); 393 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; 394 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); 395 396 // Adjust new generation size 397 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 398 assert(desired_new_size <= max_new_size, "just checking"); 399 400 bool changed = false; 401 if (desired_new_size > new_size_before) { 402 size_t change = desired_new_size - new_size_before; 403 assert(change % alignment == 0, "just checking"); 404 if (expand(change)) { 405 changed = true; 406 } 407 // If the heap failed to expand to the desired size, 408 // "changed" will be false. If the expansion failed 409 // (and at this point it was expected to succeed), 410 // ignore the failure (leaving "changed" as false). 411 } 412 if (desired_new_size < new_size_before && eden()->is_empty()) { 413 // bail out of shrinking if objects in eden 414 size_t change = new_size_before - desired_new_size; 415 assert(change % alignment == 0, "just checking"); 416 _virtual_space.shrink_by(change); 417 changed = true; 418 } 419 if (changed) { 420 // The spaces have already been mangled at this point but 421 // may not have been cleared (set top = bottom) and should be. 422 // Mangling was done when the heap was being expanded. 423 compute_space_boundaries(eden()->used(), 424 SpaceDecorator::Clear, 425 SpaceDecorator::DontMangle); 426 MemRegion cmr((HeapWord*)_virtual_space.low(), 427 (HeapWord*)_virtual_space.high()); 428 Universe::heap()->barrier_set()->resize_covered_region(cmr); 429 if (Verbose && PrintGC) { 430 size_t new_size_after = _virtual_space.committed_size(); 431 size_t eden_size_after = eden()->capacity(); 432 size_t survivor_size_after = from()->capacity(); 433 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" 434 SIZE_FORMAT "K [eden=" 435 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 436 new_size_before/K, new_size_after/K, 437 eden_size_after/K, survivor_size_after/K); 438 if (WizardMode) { 439 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 440 thread_increase_size/K, threads_count); 441 } 442 gclog_or_tty->cr(); 443 } 444 } 445 } 446 447 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 448 assert(false, "NYI -- are you sure you want to call this?"); 449 } 450 451 452 size_t DefNewGeneration::capacity() const { 453 return eden()->capacity() 454 + from()->capacity(); // to() is only used during scavenge 455 } 456 457 458 size_t DefNewGeneration::used() const { 459 return eden()->used() 460 + from()->used(); // to() is only used during scavenge 461 } 462 463 464 size_t DefNewGeneration::free() const { 465 return eden()->free() 466 + from()->free(); // to() is only used during scavenge 467 } 468 469 size_t DefNewGeneration::max_capacity() const { 470 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 471 const size_t reserved_bytes = reserved().byte_size(); 472 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 473 } 474 475 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 476 return eden()->free(); 477 } 478 479 size_t DefNewGeneration::capacity_before_gc() const { 480 return eden()->capacity(); 481 } 482 483 size_t DefNewGeneration::contiguous_available() const { 484 return eden()->free(); 485 } 486 487 488 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } 489 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 490 491 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 492 eden()->object_iterate(blk); 493 from()->object_iterate(blk); 494 } 495 496 497 void DefNewGeneration::space_iterate(SpaceClosure* blk, 498 bool usedOnly) { 499 blk->do_space(eden()); 500 blk->do_space(from()); 501 blk->do_space(to()); 502 } 503 504 // The last collection bailed out, we are running out of heap space, 505 // so we try to allocate the from-space, too. 506 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 507 HeapWord* result = NULL; 508 if (Verbose && PrintGCDetails) { 509 gclog_or_tty->print("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):" 510 " will_fail: %s" 511 " heap_lock: %s" 512 " free: " SIZE_FORMAT, 513 size, 514 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 515 "true" : "false", 516 Heap_lock->is_locked() ? "locked" : "unlocked", 517 from()->free()); 518 } 519 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { 520 if (Heap_lock->owned_by_self() || 521 (SafepointSynchronize::is_at_safepoint() && 522 Thread::current()->is_VM_thread())) { 523 // If the Heap_lock is not locked by this thread, this will be called 524 // again later with the Heap_lock held. 525 result = from()->allocate(size); 526 } else if (PrintGC && Verbose) { 527 gclog_or_tty->print_cr(" Heap_lock is not owned by self"); 528 } 529 } else if (PrintGC && Verbose) { 530 gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); 531 } 532 if (PrintGC && Verbose) { 533 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); 534 } 535 return result; 536 } 537 538 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 539 bool is_tlab, 540 bool parallel) { 541 // We don't attempt to expand the young generation (but perhaps we should.) 542 return allocate(size, is_tlab); 543 } 544 545 void DefNewGeneration::adjust_desired_tenuring_threshold() { 546 // Set the desired survivor size to half the real survivor space 547 _tenuring_threshold = 548 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 549 } 550 551 void DefNewGeneration::collect(bool full, 552 bool clear_all_soft_refs, 553 size_t size, 554 bool is_tlab) { 555 assert(full || size > 0, "otherwise we don't want to collect"); 556 557 GenCollectedHeap* gch = GenCollectedHeap::heap(); 558 559 _gc_timer->register_gc_start(); 560 DefNewTracer gc_tracer; 561 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 562 563 _old_gen = gch->old_gen(); 564 565 // If the next generation is too full to accommodate promotion 566 // from this generation, pass on collection; let the next generation 567 // do it. 568 if (!collection_attempt_is_safe()) { 569 if (Verbose && PrintGCDetails) { 570 gclog_or_tty->print(" :: Collection attempt not safe :: "); 571 } 572 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 573 return; 574 } 575 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 576 577 init_assuming_no_promotion_failure(); 578 579 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); 580 // Capture heap used before collection (for printing). 581 size_t gch_prev_used = gch->used(); 582 583 gch->trace_heap_before_gc(&gc_tracer); 584 585 SpecializationStats::clear(); 586 587 // These can be shared for all code paths 588 IsAliveClosure is_alive(this); 589 ScanWeakRefClosure scan_weak_ref(this); 590 591 age_table()->clear(); 592 to()->clear(SpaceDecorator::Mangle); 593 594 gch->rem_set()->prepare_for_younger_refs_iterate(false); 595 596 assert(gch->no_allocs_since_save_marks(Generation::Young), 597 "save marks have not been newly set."); 598 599 // Not very pretty. 600 CollectorPolicy* cp = gch->collector_policy(); 601 602 FastScanClosure fsc_with_no_gc_barrier(this, false); 603 FastScanClosure fsc_with_gc_barrier(this, true); 604 605 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, 606 gch->rem_set()->klass_rem_set()); 607 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 608 &fsc_with_no_gc_barrier, 609 false); 610 611 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 612 FastEvacuateFollowersClosure evacuate_followers(gch, this, 613 &fsc_with_no_gc_barrier, 614 &fsc_with_gc_barrier); 615 616 assert(gch->no_allocs_since_save_marks(Generation::Young), 617 "save marks have not been newly set."); 618 619 gch->gen_process_roots(Generation::Young, 620 true, // Process younger gens, if any, 621 // as strong roots. 622 true, // activate StrongRootsScope 623 SharedHeap::SO_ScavengeCodeCache, 624 GenCollectedHeap::StrongAndWeakRoots, 625 &fsc_with_no_gc_barrier, 626 &fsc_with_gc_barrier, 627 &cld_scan_closure); 628 629 // "evacuate followers". 630 evacuate_followers.do_void(); 631 632 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 633 ReferenceProcessor* rp = ref_processor(); 634 rp->setup_policy(clear_all_soft_refs); 635 const ReferenceProcessorStats& stats = 636 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 637 NULL, _gc_timer, gc_tracer.gc_id()); 638 gc_tracer.report_gc_reference_stats(stats); 639 640 if (!_promotion_failed) { 641 // Swap the survivor spaces. 642 eden()->clear(SpaceDecorator::Mangle); 643 from()->clear(SpaceDecorator::Mangle); 644 if (ZapUnusedHeapArea) { 645 // This is now done here because of the piece-meal mangling which 646 // can check for valid mangling at intermediate points in the 647 // collection(s). When a minor collection fails to collect 648 // sufficient space resizing of the young generation can occur 649 // an redistribute the spaces in the young generation. Mangle 650 // here so that unzapped regions don't get distributed to 651 // other spaces. 652 to()->mangle_unused_area(); 653 } 654 swap_spaces(); 655 656 assert(to()->is_empty(), "to space should be empty now"); 657 658 adjust_desired_tenuring_threshold(); 659 660 // A successful scavenge should restart the GC time limit count which is 661 // for full GC's. 662 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 663 size_policy->reset_gc_overhead_limit_count(); 664 assert(!gch->incremental_collection_failed(), "Should be clear"); 665 } else { 666 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 667 _promo_failure_scan_stack.clear(true); // Clear cached segments. 668 669 remove_forwarding_pointers(); 670 if (PrintGCDetails) { 671 gclog_or_tty->print(" (promotion failed) "); 672 } 673 // Add to-space to the list of space to compact 674 // when a promotion failure has occurred. In that 675 // case there can be live objects in to-space 676 // as a result of a partial evacuation of eden 677 // and from-space. 678 swap_spaces(); // For uniformity wrt ParNewGeneration. 679 from()->set_next_compaction_space(to()); 680 gch->set_incremental_collection_failed(); 681 682 // Inform the next generation that a promotion failure occurred. 683 _old_gen->promotion_failure_occurred(); 684 gc_tracer.report_promotion_failed(_promotion_failed_info); 685 686 // Reset the PromotionFailureALot counters. 687 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 688 } 689 if (PrintGC && !PrintGCDetails) { 690 gch->print_heap_change(gch_prev_used); 691 } 692 // set new iteration safe limit for the survivor spaces 693 from()->set_concurrent_iteration_safe_limit(from()->top()); 694 to()->set_concurrent_iteration_safe_limit(to()->top()); 695 SpecializationStats::print(); 696 697 // We need to use a monotonically non-decreasing time in ms 698 // or we will see time-warp warnings and os::javaTimeMillis() 699 // does not guarantee monotonicity. 700 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 701 update_time_of_last_gc(now); 702 703 gch->trace_heap_after_gc(&gc_tracer); 704 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 705 706 _gc_timer->register_gc_end(); 707 708 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 709 } 710 711 class RemoveForwardPointerClosure: public ObjectClosure { 712 public: 713 void do_object(oop obj) { 714 obj->init_mark(); 715 } 716 }; 717 718 void DefNewGeneration::init_assuming_no_promotion_failure() { 719 _promotion_failed = false; 720 _promotion_failed_info.reset(); 721 from()->set_next_compaction_space(NULL); 722 } 723 724 void DefNewGeneration::remove_forwarding_pointers() { 725 RemoveForwardPointerClosure rspc; 726 eden()->object_iterate(&rspc); 727 from()->object_iterate(&rspc); 728 729 // Now restore saved marks, if any. 730 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), 731 "should be the same"); 732 while (!_objs_with_preserved_marks.is_empty()) { 733 oop obj = _objs_with_preserved_marks.pop(); 734 markOop m = _preserved_marks_of_objs.pop(); 735 obj->set_mark(m); 736 } 737 _objs_with_preserved_marks.clear(true); 738 _preserved_marks_of_objs.clear(true); 739 } 740 741 void DefNewGeneration::preserve_mark(oop obj, markOop m) { 742 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), 743 "Oversaving!"); 744 _objs_with_preserved_marks.push(obj); 745 _preserved_marks_of_objs.push(m); 746 } 747 748 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 749 if (m->must_be_preserved_for_promotion_failure(obj)) { 750 preserve_mark(obj, m); 751 } 752 } 753 754 void DefNewGeneration::handle_promotion_failure(oop old) { 755 if (PrintPromotionFailure && !_promotion_failed) { 756 gclog_or_tty->print(" (promotion failure size = %d) ", 757 old->size()); 758 } 759 _promotion_failed = true; 760 _promotion_failed_info.register_copy_failure(old->size()); 761 preserve_mark_if_necessary(old, old->mark()); 762 // forward to self 763 old->forward_to(old); 764 765 _promo_failure_scan_stack.push(old); 766 767 if (!_promo_failure_drain_in_progress) { 768 // prevent recursion in copy_to_survivor_space() 769 _promo_failure_drain_in_progress = true; 770 drain_promo_failure_scan_stack(); 771 _promo_failure_drain_in_progress = false; 772 } 773 } 774 775 oop DefNewGeneration::copy_to_survivor_space(oop old) { 776 assert(is_in_reserved(old) && !old->is_forwarded(), 777 "shouldn't be scavenging this oop"); 778 size_t s = old->size(); 779 oop obj = NULL; 780 781 // Try allocating obj in to-space (unless too old) 782 if (old->age() < tenuring_threshold()) { 783 obj = (oop) to()->allocate_aligned(s); 784 } 785 786 // Otherwise try allocating obj tenured 787 if (obj == NULL) { 788 obj = _old_gen->promote(old, s); 789 if (obj == NULL) { 790 handle_promotion_failure(old); 791 return old; 792 } 793 } else { 794 // Prefetch beyond obj 795 const intx interval = PrefetchCopyIntervalInBytes; 796 Prefetch::write(obj, interval); 797 798 // Copy obj 799 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 800 801 // Increment age if obj still in new generation 802 obj->incr_age(); 803 age_table()->add(obj, s); 804 } 805 806 // Done, insert forward pointer to obj in this header 807 old->forward_to(obj); 808 809 return obj; 810 } 811 812 void DefNewGeneration::drain_promo_failure_scan_stack() { 813 while (!_promo_failure_scan_stack.is_empty()) { 814 oop obj = _promo_failure_scan_stack.pop(); 815 obj->oop_iterate(_promo_failure_scan_stack_closure); 816 } 817 } 818 819 void DefNewGeneration::save_marks() { 820 eden()->set_saved_mark(); 821 to()->set_saved_mark(); 822 from()->set_saved_mark(); 823 } 824 825 826 void DefNewGeneration::reset_saved_marks() { 827 eden()->reset_saved_mark(); 828 to()->reset_saved_mark(); 829 from()->reset_saved_mark(); 830 } 831 832 833 bool DefNewGeneration::no_allocs_since_save_marks() { 834 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 835 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 836 return to()->saved_mark_at_top(); 837 } 838 839 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 840 \ 841 void DefNewGeneration:: \ 842 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 843 cl->set_generation(this); \ 844 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 845 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 846 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 847 cl->reset_generation(); \ 848 save_marks(); \ 849 } 850 851 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 852 853 #undef DefNew_SINCE_SAVE_MARKS_DEFN 854 855 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 856 size_t max_alloc_words) { 857 if (requestor == this || _promotion_failed) return; 858 assert(requestor == GenCollectedHeap::heap()->old_gen(), "We should not call our own generation"); 859 860 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 861 if (to_space->top() > to_space->bottom()) { 862 trace("to_space not empty when contribute_scratch called"); 863 } 864 */ 865 866 ContiguousSpace* to_space = to(); 867 assert(to_space->end() >= to_space->top(), "pointers out of order"); 868 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 869 if (free_words >= MinFreeScratchWords) { 870 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 871 sb->num_words = free_words; 872 sb->next = list; 873 list = sb; 874 } 875 } 876 877 void DefNewGeneration::reset_scratch() { 878 // If contributing scratch in to_space, mangle all of 879 // to_space if ZapUnusedHeapArea. This is needed because 880 // top is not maintained while using to-space as scratch. 881 if (ZapUnusedHeapArea) { 882 to()->mangle_unused_area_complete(); 883 } 884 } 885 886 bool DefNewGeneration::collection_attempt_is_safe() { 887 if (!to()->is_empty()) { 888 if (Verbose && PrintGCDetails) { 889 gclog_or_tty->print(" :: to is not empty :: "); 890 } 891 return false; 892 } 893 if (_old_gen == NULL) { 894 GenCollectedHeap* gch = GenCollectedHeap::heap(); 895 _old_gen = gch->old_gen(); 896 } 897 return _old_gen->promotion_attempt_is_safe(used()); 898 } 899 900 void DefNewGeneration::gc_epilogue(bool full) { 901 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 902 903 assert(!GC_locker::is_active(), "We should not be executing here"); 904 // Check if the heap is approaching full after a collection has 905 // been done. Generally the young generation is empty at 906 // a minimum at the end of a collection. If it is not, then 907 // the heap is approaching full. 908 GenCollectedHeap* gch = GenCollectedHeap::heap(); 909 if (full) { 910 DEBUG_ONLY(seen_incremental_collection_failed = false;) 911 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 912 if (Verbose && PrintGCDetails) { 913 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 914 GCCause::to_string(gch->gc_cause())); 915 } 916 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 917 set_should_allocate_from_space(); // we seem to be running out of space 918 } else { 919 if (Verbose && PrintGCDetails) { 920 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 921 GCCause::to_string(gch->gc_cause())); 922 } 923 gch->clear_incremental_collection_failed(); // We just did a full collection 924 clear_should_allocate_from_space(); // if set 925 } 926 } else { 927 #ifdef ASSERT 928 // It is possible that incremental_collection_failed() == true 929 // here, because an attempted scavenge did not succeed. The policy 930 // is normally expected to cause a full collection which should 931 // clear that condition, so we should not be here twice in a row 932 // with incremental_collection_failed() == true without having done 933 // a full collection in between. 934 if (!seen_incremental_collection_failed && 935 gch->incremental_collection_failed()) { 936 if (Verbose && PrintGCDetails) { 937 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 938 GCCause::to_string(gch->gc_cause())); 939 } 940 seen_incremental_collection_failed = true; 941 } else if (seen_incremental_collection_failed) { 942 if (Verbose && PrintGCDetails) { 943 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 944 GCCause::to_string(gch->gc_cause())); 945 } 946 assert(gch->gc_cause() == GCCause::_scavenge_alot || 947 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 948 !gch->incremental_collection_failed(), 949 "Twice in a row"); 950 seen_incremental_collection_failed = false; 951 } 952 #endif // ASSERT 953 } 954 955 if (ZapUnusedHeapArea) { 956 eden()->check_mangled_unused_area_complete(); 957 from()->check_mangled_unused_area_complete(); 958 to()->check_mangled_unused_area_complete(); 959 } 960 961 if (!CleanChunkPoolAsync) { 962 Chunk::clean_chunk_pool(); 963 } 964 965 // update the generation and space performance counters 966 update_counters(); 967 gch->collector_policy()->counters()->update_counters(); 968 } 969 970 void DefNewGeneration::record_spaces_top() { 971 assert(ZapUnusedHeapArea, "Not mangling unused space"); 972 eden()->set_top_for_allocations(); 973 to()->set_top_for_allocations(); 974 from()->set_top_for_allocations(); 975 } 976 977 void DefNewGeneration::ref_processor_init() { 978 Generation::ref_processor_init(); 979 } 980 981 982 void DefNewGeneration::update_counters() { 983 if (UsePerfData) { 984 _eden_counters->update_all(); 985 _from_counters->update_all(); 986 _to_counters->update_all(); 987 _gen_counters->update_all(); 988 } 989 } 990 991 void DefNewGeneration::verify() { 992 eden()->verify(); 993 from()->verify(); 994 to()->verify(); 995 } 996 997 void DefNewGeneration::print_on(outputStream* st) const { 998 Generation::print_on(st); 999 st->print(" eden"); 1000 eden()->print_on(st); 1001 st->print(" from"); 1002 from()->print_on(st); 1003 st->print(" to "); 1004 to()->print_on(st); 1005 } 1006 1007 1008 const char* DefNewGeneration::name() const { 1009 return "def new generation"; 1010 } 1011 1012 // Moved from inline file as they are not called inline 1013 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 1014 return eden(); 1015 } 1016 1017 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { 1018 // This is the slow-path allocation for the DefNewGeneration. 1019 // Most allocations are fast-path in compiled code. 1020 // We try to allocate from the eden. If that works, we are happy. 1021 // Note that since DefNewGeneration supports lock-free allocation, we 1022 // have to use it here, as well. 1023 HeapWord* result = eden()->par_allocate(word_size); 1024 if (result != NULL) { 1025 if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 1026 _old_gen->sample_eden_chunk(); 1027 } 1028 return result; 1029 } 1030 do { 1031 HeapWord* old_limit = eden()->soft_end(); 1032 if (old_limit < eden()->end()) { 1033 // Tell the old generation we reached a limit. 1034 HeapWord* new_limit = 1035 _old_gen->allocation_limit_reached(eden(), eden()->top(), word_size); 1036 if (new_limit != NULL) { 1037 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); 1038 } else { 1039 assert(eden()->soft_end() == eden()->end(), 1040 "invalid state after allocation_limit_reached returned null"); 1041 } 1042 } else { 1043 // The allocation failed and the soft limit is equal to the hard limit, 1044 // there are no reasons to do an attempt to allocate 1045 assert(old_limit == eden()->end(), "sanity check"); 1046 break; 1047 } 1048 // Try to allocate until succeeded or the soft limit can't be adjusted 1049 result = eden()->par_allocate(word_size); 1050 } while (result == NULL); 1051 1052 // If the eden is full and the last collection bailed out, we are running 1053 // out of heap space, and we try to allocate the from-space, too. 1054 // allocate_from_space can't be inlined because that would introduce a 1055 // circular dependency at compile time. 1056 if (result == NULL) { 1057 result = allocate_from_space(word_size); 1058 } else if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 1059 _old_gen->sample_eden_chunk(); 1060 } 1061 return result; 1062 } 1063 1064 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1065 bool is_tlab) { 1066 HeapWord* res = eden()->par_allocate(word_size); 1067 if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 1068 _old_gen->sample_eden_chunk(); 1069 } 1070 return res; 1071 } 1072 1073 void DefNewGeneration::gc_prologue(bool full) { 1074 // Ensure that _end and _soft_end are the same in eden space. 1075 eden()->set_soft_end(eden()->end()); 1076 } 1077 1078 size_t DefNewGeneration::tlab_capacity() const { 1079 return eden()->capacity(); 1080 } 1081 1082 size_t DefNewGeneration::tlab_used() const { 1083 return eden()->used(); 1084 } 1085 1086 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1087 return unsafe_max_alloc_nogc(); 1088 }