1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/serial/defNewGeneration.inline.hpp" 27 #include "gc/shared/cardTableRS.hpp" 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/gcHeapSummary.hpp" 30 #include "gc/shared/gcLocker.inline.hpp" 31 #include "gc/shared/gcPolicyCounters.hpp" 32 #include "gc/shared/gcTimer.hpp" 33 #include "gc/shared/gcTrace.hpp" 34 #include "gc/shared/gcTraceTime.hpp" 35 #include "gc/shared/genCollectedHeap.hpp" 36 #include "gc/shared/genOopClosures.inline.hpp" 37 #include "gc/shared/generationSpec.hpp" 38 #include "gc/shared/referencePolicy.hpp" 39 #include "gc/shared/space.inline.hpp" 40 #include "gc/shared/spaceDecorator.hpp" 41 #include "gc/shared/strongRootsScope.hpp" 42 #include "logging/log.hpp" 43 #include "memory/iterator.hpp" 44 #include "oops/instanceRefKlass.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "runtime/atomic.inline.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/prefetch.inline.hpp" 49 #include "runtime/thread.inline.hpp" 50 #include "utilities/copy.hpp" 51 #include "utilities/globalDefinitions.hpp" 52 #include "utilities/stack.inline.hpp" 53 #if INCLUDE_ALL_GCS 54 #include "gc/cms/parOopClosures.hpp" 55 #endif 56 57 // 58 // DefNewGeneration functions. 59 60 // Methods of protected closure types. 61 62 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) { 63 assert(_young_gen->kind() == Generation::ParNew || 64 _young_gen->kind() == Generation::DefNew, "Expected the young generation here"); 65 } 66 67 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 68 return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded(); 69 } 70 71 DefNewGeneration::KeepAliveClosure:: 72 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 73 _rs = GenCollectedHeap::heap()->rem_set(); 74 } 75 76 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 77 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 78 79 80 DefNewGeneration::FastKeepAliveClosure:: 81 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 82 DefNewGeneration::KeepAliveClosure(cl) { 83 _boundary = g->reserved().end(); 84 } 85 86 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 87 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 88 89 DefNewGeneration::EvacuateFollowersClosure:: 90 EvacuateFollowersClosure(GenCollectedHeap* gch, 91 ScanClosure* cur, 92 ScanClosure* older) : 93 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 94 {} 95 96 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 97 do { 98 _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older); 99 } while (!_gch->no_allocs_since_save_marks()); 100 } 101 102 DefNewGeneration::FastEvacuateFollowersClosure:: 103 FastEvacuateFollowersClosure(GenCollectedHeap* gch, 104 FastScanClosure* cur, 105 FastScanClosure* older) : 106 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 107 { 108 assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew"); 109 _young_gen = (DefNewGeneration*)_gch->young_gen(); 110 } 111 112 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 113 do { 114 _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older); 115 } while (!_gch->no_allocs_since_save_marks()); 116 guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 117 } 118 119 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 120 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 121 { 122 _boundary = _g->reserved().end(); 123 } 124 125 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 126 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 127 128 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 129 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 130 { 131 _boundary = _g->reserved().end(); 132 } 133 134 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 135 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 136 137 void KlassScanClosure::do_klass(Klass* klass) { 138 #ifndef PRODUCT 139 ResourceMark rm; 140 log_develop(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", 141 p2i(klass), 142 klass->external_name(), 143 klass->has_modified_oops() ? "true" : "false"); 144 #endif 145 146 // If the klass has not been dirtied we know that there's 147 // no references into the young gen and we can skip it. 148 if (klass->has_modified_oops()) { 149 if (_accumulate_modified_oops) { 150 klass->accumulate_modified_oops(); 151 } 152 153 // Clear this state since we're going to scavenge all the metadata. 154 klass->clear_modified_oops(); 155 156 // Tell the closure which Klass is being scanned so that it can be dirtied 157 // if oops are left pointing into the young gen. 158 _scavenge_closure->set_scanned_klass(klass); 159 160 klass->oops_do(_scavenge_closure); 161 162 _scavenge_closure->set_scanned_klass(NULL); 163 } 164 } 165 166 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 167 _g(g) 168 { 169 _boundary = _g->reserved().end(); 170 } 171 172 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 173 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 174 175 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 176 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 177 178 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, 179 KlassRemSet* klass_rem_set) 180 : _scavenge_closure(scavenge_closure), 181 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} 182 183 184 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 185 size_t initial_size, 186 const char* policy) 187 : Generation(rs, initial_size), 188 _promo_failure_drain_in_progress(false), 189 _should_allocate_from_space(false) 190 { 191 MemRegion cmr((HeapWord*)_virtual_space.low(), 192 (HeapWord*)_virtual_space.high()); 193 GenCollectedHeap* gch = GenCollectedHeap::heap(); 194 195 gch->barrier_set()->resize_covered_region(cmr); 196 197 _eden_space = new ContiguousSpace(); 198 _from_space = new ContiguousSpace(); 199 _to_space = new ContiguousSpace(); 200 201 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { 202 vm_exit_during_initialization("Could not allocate a new gen space"); 203 } 204 205 // Compute the maximum eden and survivor space sizes. These sizes 206 // are computed assuming the entire reserved space is committed. 207 // These values are exported as performance counters. 208 uintx alignment = gch->collector_policy()->space_alignment(); 209 uintx size = _virtual_space.reserved_size(); 210 _max_survivor_size = compute_survivor_size(size, alignment); 211 _max_eden_size = size - (2*_max_survivor_size); 212 213 // allocate the performance counters 214 GenCollectorPolicy* gcp = gch->gen_policy(); 215 216 // Generation counters -- generation 0, 3 subspaces 217 _gen_counters = new GenerationCounters("new", 0, 3, 218 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space); 219 _gc_counters = new CollectorCounters(policy, 0); 220 221 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 222 _gen_counters); 223 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 224 _gen_counters); 225 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 226 _gen_counters); 227 228 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 229 update_counters(); 230 _old_gen = NULL; 231 _tenuring_threshold = MaxTenuringThreshold; 232 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 233 234 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 235 } 236 237 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 238 bool clear_space, 239 bool mangle_space) { 240 uintx alignment = 241 GenCollectedHeap::heap()->collector_policy()->space_alignment(); 242 243 // If the spaces are being cleared (only done at heap initialization 244 // currently), the survivor spaces need not be empty. 245 // Otherwise, no care is taken for used areas in the survivor spaces 246 // so check. 247 assert(clear_space || (to()->is_empty() && from()->is_empty()), 248 "Initialization of the survivor spaces assumes these are empty"); 249 250 // Compute sizes 251 uintx size = _virtual_space.committed_size(); 252 uintx survivor_size = compute_survivor_size(size, alignment); 253 uintx eden_size = size - (2*survivor_size); 254 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 255 256 if (eden_size < minimum_eden_size) { 257 // May happen due to 64Kb rounding, if so adjust eden size back up 258 minimum_eden_size = align_size_up(minimum_eden_size, alignment); 259 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 260 uintx unaligned_survivor_size = 261 align_size_down(maximum_survivor_size, alignment); 262 survivor_size = MAX2(unaligned_survivor_size, alignment); 263 eden_size = size - (2*survivor_size); 264 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 265 assert(eden_size >= minimum_eden_size, "just checking"); 266 } 267 268 char *eden_start = _virtual_space.low(); 269 char *from_start = eden_start + eden_size; 270 char *to_start = from_start + survivor_size; 271 char *to_end = to_start + survivor_size; 272 273 assert(to_end == _virtual_space.high(), "just checking"); 274 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); 275 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); 276 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); 277 278 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 279 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 280 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 281 282 // A minimum eden size implies that there is a part of eden that 283 // is being used and that affects the initialization of any 284 // newly formed eden. 285 bool live_in_eden = minimum_eden_size > 0; 286 287 // If not clearing the spaces, do some checking to verify that 288 // the space are already mangled. 289 if (!clear_space) { 290 // Must check mangling before the spaces are reshaped. Otherwise, 291 // the bottom or end of one space may have moved into another 292 // a failure of the check may not correctly indicate which space 293 // is not properly mangled. 294 if (ZapUnusedHeapArea) { 295 HeapWord* limit = (HeapWord*) _virtual_space.high(); 296 eden()->check_mangled_unused_area(limit); 297 from()->check_mangled_unused_area(limit); 298 to()->check_mangled_unused_area(limit); 299 } 300 } 301 302 // Reset the spaces for their new regions. 303 eden()->initialize(edenMR, 304 clear_space && !live_in_eden, 305 SpaceDecorator::Mangle); 306 // If clear_space and live_in_eden, we will not have cleared any 307 // portion of eden above its top. This can cause newly 308 // expanded space not to be mangled if using ZapUnusedHeapArea. 309 // We explicitly do such mangling here. 310 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 311 eden()->mangle_unused_area(); 312 } 313 from()->initialize(fromMR, clear_space, mangle_space); 314 to()->initialize(toMR, clear_space, mangle_space); 315 316 // Set next compaction spaces. 317 eden()->set_next_compaction_space(from()); 318 // The to-space is normally empty before a compaction so need 319 // not be considered. The exception is during promotion 320 // failure handling when to-space can contain live objects. 321 from()->set_next_compaction_space(NULL); 322 } 323 324 void DefNewGeneration::swap_spaces() { 325 ContiguousSpace* s = from(); 326 _from_space = to(); 327 _to_space = s; 328 eden()->set_next_compaction_space(from()); 329 // The to-space is normally empty before a compaction so need 330 // not be considered. The exception is during promotion 331 // failure handling when to-space can contain live objects. 332 from()->set_next_compaction_space(NULL); 333 334 if (UsePerfData) { 335 CSpaceCounters* c = _from_counters; 336 _from_counters = _to_counters; 337 _to_counters = c; 338 } 339 } 340 341 bool DefNewGeneration::expand(size_t bytes) { 342 MutexLocker x(ExpandHeap_lock); 343 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 344 bool success = _virtual_space.expand_by(bytes); 345 if (success && ZapUnusedHeapArea) { 346 // Mangle newly committed space immediately because it 347 // can be done here more simply that after the new 348 // spaces have been computed. 349 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 350 MemRegion mangle_region(prev_high, new_high); 351 SpaceMangler::mangle_region(mangle_region); 352 } 353 354 // Do not attempt an expand-to-the reserve size. The 355 // request should properly observe the maximum size of 356 // the generation so an expand-to-reserve should be 357 // unnecessary. Also a second call to expand-to-reserve 358 // value potentially can cause an undue expansion. 359 // For example if the first expand fail for unknown reasons, 360 // but the second succeeds and expands the heap to its maximum 361 // value. 362 if (GC_locker::is_active()) { 363 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 364 } 365 366 return success; 367 } 368 369 void DefNewGeneration::compute_new_size() { 370 // This is called after a GC that includes the old generation, so from-space 371 // will normally be empty. 372 // Note that we check both spaces, since if scavenge failed they revert roles. 373 // If not we bail out (otherwise we would have to relocate the objects). 374 if (!from()->is_empty() || !to()->is_empty()) { 375 return; 376 } 377 378 GenCollectedHeap* gch = GenCollectedHeap::heap(); 379 380 size_t old_size = gch->old_gen()->capacity(); 381 size_t new_size_before = _virtual_space.committed_size(); 382 size_t min_new_size = initial_size(); 383 size_t max_new_size = reserved().byte_size(); 384 assert(min_new_size <= new_size_before && 385 new_size_before <= max_new_size, 386 "just checking"); 387 // All space sizes must be multiples of Generation::GenGrain. 388 size_t alignment = Generation::GenGrain; 389 390 // Compute desired new generation size based on NewRatio and 391 // NewSizeThreadIncrease 392 size_t desired_new_size = old_size/NewRatio; 393 int threads_count = Threads::number_of_non_daemon_threads(); 394 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; 395 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); 396 397 // Adjust new generation size 398 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 399 assert(desired_new_size <= max_new_size, "just checking"); 400 401 bool changed = false; 402 if (desired_new_size > new_size_before) { 403 size_t change = desired_new_size - new_size_before; 404 assert(change % alignment == 0, "just checking"); 405 if (expand(change)) { 406 changed = true; 407 } 408 // If the heap failed to expand to the desired size, 409 // "changed" will be false. If the expansion failed 410 // (and at this point it was expected to succeed), 411 // ignore the failure (leaving "changed" as false). 412 } 413 if (desired_new_size < new_size_before && eden()->is_empty()) { 414 // bail out of shrinking if objects in eden 415 size_t change = new_size_before - desired_new_size; 416 assert(change % alignment == 0, "just checking"); 417 _virtual_space.shrink_by(change); 418 changed = true; 419 } 420 if (changed) { 421 // The spaces have already been mangled at this point but 422 // may not have been cleared (set top = bottom) and should be. 423 // Mangling was done when the heap was being expanded. 424 compute_space_boundaries(eden()->used(), 425 SpaceDecorator::Clear, 426 SpaceDecorator::DontMangle); 427 MemRegion cmr((HeapWord*)_virtual_space.low(), 428 (HeapWord*)_virtual_space.high()); 429 gch->barrier_set()->resize_covered_region(cmr); 430 431 log_debug(gc, heap, ergo)( 432 "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 433 new_size_before/K, _virtual_space.committed_size()/K, 434 eden()->capacity()/K, from()->capacity()/K); 435 log_trace(gc, heap, ergo)( 436 " [allowed " SIZE_FORMAT "K extra for %d threads]", 437 thread_increase_size/K, threads_count); 438 } 439 } 440 441 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) { 442 assert(false, "NYI -- are you sure you want to call this?"); 443 } 444 445 446 size_t DefNewGeneration::capacity() const { 447 return eden()->capacity() 448 + from()->capacity(); // to() is only used during scavenge 449 } 450 451 452 size_t DefNewGeneration::used() const { 453 return eden()->used() 454 + from()->used(); // to() is only used during scavenge 455 } 456 457 458 size_t DefNewGeneration::free() const { 459 return eden()->free() 460 + from()->free(); // to() is only used during scavenge 461 } 462 463 size_t DefNewGeneration::max_capacity() const { 464 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 465 const size_t reserved_bytes = reserved().byte_size(); 466 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 467 } 468 469 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 470 return eden()->free(); 471 } 472 473 size_t DefNewGeneration::capacity_before_gc() const { 474 return eden()->capacity(); 475 } 476 477 size_t DefNewGeneration::contiguous_available() const { 478 return eden()->free(); 479 } 480 481 482 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } 483 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 484 485 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 486 eden()->object_iterate(blk); 487 from()->object_iterate(blk); 488 } 489 490 491 void DefNewGeneration::space_iterate(SpaceClosure* blk, 492 bool usedOnly) { 493 blk->do_space(eden()); 494 blk->do_space(from()); 495 blk->do_space(to()); 496 } 497 498 // The last collection bailed out, we are running out of heap space, 499 // so we try to allocate the from-space, too. 500 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 501 bool should_try_alloc = should_allocate_from_space() || GC_locker::is_active_and_needs_gc(); 502 503 // If the Heap_lock is not locked by this thread, this will be called 504 // again later with the Heap_lock held. 505 bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread())); 506 507 HeapWord* result = NULL; 508 if (do_alloc) { 509 result = from()->allocate(size); 510 } 511 512 log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "): will_fail: %s heap_lock: %s free: " SIZE_FORMAT "%s%s returns %s", 513 size, 514 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 515 "true" : "false", 516 Heap_lock->is_locked() ? "locked" : "unlocked", 517 from()->free(), 518 should_try_alloc ? "" : " should_allocate_from_space: NOT", 519 do_alloc ? " Heap_lock is not owned by self" : "", 520 result == NULL ? "NULL" : "object"); 521 522 return result; 523 } 524 525 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 526 bool is_tlab, 527 bool parallel) { 528 // We don't attempt to expand the young generation (but perhaps we should.) 529 return allocate(size, is_tlab); 530 } 531 532 void DefNewGeneration::adjust_desired_tenuring_threshold() { 533 // Set the desired survivor size to half the real survivor space 534 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->collector_policy()->counters(); 535 _tenuring_threshold = 536 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters); 537 } 538 539 void DefNewGeneration::collect(bool full, 540 bool clear_all_soft_refs, 541 size_t size, 542 bool is_tlab) { 543 assert(full || size > 0, "otherwise we don't want to collect"); 544 545 GenCollectedHeap* gch = GenCollectedHeap::heap(); 546 547 _gc_timer->register_gc_start(); 548 DefNewTracer gc_tracer; 549 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 550 551 _old_gen = gch->old_gen(); 552 553 // If the next generation is too full to accommodate promotion 554 // from this generation, pass on collection; let the next generation 555 // do it. 556 if (!collection_attempt_is_safe()) { 557 log_trace(gc)(":: Collection attempt not safe ::"); 558 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 559 return; 560 } 561 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 562 563 init_assuming_no_promotion_failure(); 564 565 GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause()); 566 567 gch->trace_heap_before_gc(&gc_tracer); 568 569 // These can be shared for all code paths 570 IsAliveClosure is_alive(this); 571 ScanWeakRefClosure scan_weak_ref(this); 572 573 age_table()->clear(); 574 to()->clear(SpaceDecorator::Mangle); 575 576 gch->rem_set()->prepare_for_younger_refs_iterate(false); 577 578 assert(gch->no_allocs_since_save_marks(), 579 "save marks have not been newly set."); 580 581 // Not very pretty. 582 CollectorPolicy* cp = gch->collector_policy(); 583 584 FastScanClosure fsc_with_no_gc_barrier(this, false); 585 FastScanClosure fsc_with_gc_barrier(this, true); 586 587 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, 588 gch->rem_set()->klass_rem_set()); 589 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 590 &fsc_with_no_gc_barrier, 591 false); 592 593 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 594 FastEvacuateFollowersClosure evacuate_followers(gch, 595 &fsc_with_no_gc_barrier, 596 &fsc_with_gc_barrier); 597 598 assert(gch->no_allocs_since_save_marks(), 599 "save marks have not been newly set."); 600 601 { 602 // DefNew needs to run with n_threads == 0, to make sure the serial 603 // version of the card table scanning code is used. 604 // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel. 605 StrongRootsScope srs(0); 606 607 gch->gen_process_roots(&srs, 608 GenCollectedHeap::YoungGen, 609 true, // Process younger gens, if any, 610 // as strong roots. 611 GenCollectedHeap::SO_ScavengeCodeCache, 612 GenCollectedHeap::StrongAndWeakRoots, 613 &fsc_with_no_gc_barrier, 614 &fsc_with_gc_barrier, 615 &cld_scan_closure); 616 } 617 618 // "evacuate followers". 619 evacuate_followers.do_void(); 620 621 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 622 ReferenceProcessor* rp = ref_processor(); 623 rp->setup_policy(clear_all_soft_refs); 624 const ReferenceProcessorStats& stats = 625 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 626 NULL, _gc_timer); 627 gc_tracer.report_gc_reference_stats(stats); 628 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 629 630 if (!_promotion_failed) { 631 // Swap the survivor spaces. 632 eden()->clear(SpaceDecorator::Mangle); 633 from()->clear(SpaceDecorator::Mangle); 634 if (ZapUnusedHeapArea) { 635 // This is now done here because of the piece-meal mangling which 636 // can check for valid mangling at intermediate points in the 637 // collection(s). When a young collection fails to collect 638 // sufficient space resizing of the young generation can occur 639 // an redistribute the spaces in the young generation. Mangle 640 // here so that unzapped regions don't get distributed to 641 // other spaces. 642 to()->mangle_unused_area(); 643 } 644 swap_spaces(); 645 646 assert(to()->is_empty(), "to space should be empty now"); 647 648 adjust_desired_tenuring_threshold(); 649 650 // A successful scavenge should restart the GC time limit count which is 651 // for full GC's. 652 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 653 size_policy->reset_gc_overhead_limit_count(); 654 assert(!gch->incremental_collection_failed(), "Should be clear"); 655 } else { 656 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 657 _promo_failure_scan_stack.clear(true); // Clear cached segments. 658 659 remove_forwarding_pointers(); 660 log_debug(gc)("Promotion failed"); 661 // Add to-space to the list of space to compact 662 // when a promotion failure has occurred. In that 663 // case there can be live objects in to-space 664 // as a result of a partial evacuation of eden 665 // and from-space. 666 swap_spaces(); // For uniformity wrt ParNewGeneration. 667 from()->set_next_compaction_space(to()); 668 gch->set_incremental_collection_failed(); 669 670 // Inform the next generation that a promotion failure occurred. 671 _old_gen->promotion_failure_occurred(); 672 gc_tracer.report_promotion_failed(_promotion_failed_info); 673 674 // Reset the PromotionFailureALot counters. 675 NOT_PRODUCT(gch->reset_promotion_should_fail();) 676 } 677 // set new iteration safe limit for the survivor spaces 678 from()->set_concurrent_iteration_safe_limit(from()->top()); 679 to()->set_concurrent_iteration_safe_limit(to()->top()); 680 681 // We need to use a monotonically non-decreasing time in ms 682 // or we will see time-warp warnings and os::javaTimeMillis() 683 // does not guarantee monotonicity. 684 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 685 update_time_of_last_gc(now); 686 687 gch->trace_heap_after_gc(&gc_tracer); 688 689 _gc_timer->register_gc_end(); 690 691 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 692 } 693 694 class RemoveForwardPointerClosure: public ObjectClosure { 695 public: 696 void do_object(oop obj) { 697 obj->init_mark(); 698 } 699 }; 700 701 void DefNewGeneration::init_assuming_no_promotion_failure() { 702 _promotion_failed = false; 703 _promotion_failed_info.reset(); 704 from()->set_next_compaction_space(NULL); 705 } 706 707 void DefNewGeneration::remove_forwarding_pointers() { 708 RemoveForwardPointerClosure rspc; 709 eden()->object_iterate(&rspc); 710 from()->object_iterate(&rspc); 711 712 // Now restore saved marks, if any. 713 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), 714 "should be the same"); 715 while (!_objs_with_preserved_marks.is_empty()) { 716 oop obj = _objs_with_preserved_marks.pop(); 717 markOop m = _preserved_marks_of_objs.pop(); 718 obj->set_mark(m); 719 } 720 _objs_with_preserved_marks.clear(true); 721 _preserved_marks_of_objs.clear(true); 722 } 723 724 void DefNewGeneration::preserve_mark(oop obj, markOop m) { 725 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), 726 "Oversaving!"); 727 _objs_with_preserved_marks.push(obj); 728 _preserved_marks_of_objs.push(m); 729 } 730 731 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 732 if (m->must_be_preserved_for_promotion_failure(obj)) { 733 preserve_mark(obj, m); 734 } 735 } 736 737 void DefNewGeneration::handle_promotion_failure(oop old) { 738 log_debug(promotion)("Promotion failure size = %d) ", old->size()); 739 740 _promotion_failed = true; 741 _promotion_failed_info.register_copy_failure(old->size()); 742 preserve_mark_if_necessary(old, old->mark()); 743 // forward to self 744 old->forward_to(old); 745 746 _promo_failure_scan_stack.push(old); 747 748 if (!_promo_failure_drain_in_progress) { 749 // prevent recursion in copy_to_survivor_space() 750 _promo_failure_drain_in_progress = true; 751 drain_promo_failure_scan_stack(); 752 _promo_failure_drain_in_progress = false; 753 } 754 } 755 756 oop DefNewGeneration::copy_to_survivor_space(oop old) { 757 assert(is_in_reserved(old) && !old->is_forwarded(), 758 "shouldn't be scavenging this oop"); 759 size_t s = old->size(); 760 oop obj = NULL; 761 762 // Try allocating obj in to-space (unless too old) 763 if (old->age() < tenuring_threshold()) { 764 obj = (oop) to()->allocate_aligned(s); 765 } 766 767 // Otherwise try allocating obj tenured 768 if (obj == NULL) { 769 obj = _old_gen->promote(old, s); 770 if (obj == NULL) { 771 handle_promotion_failure(old); 772 return old; 773 } 774 } else { 775 // Prefetch beyond obj 776 const intx interval = PrefetchCopyIntervalInBytes; 777 Prefetch::write(obj, interval); 778 779 // Copy obj 780 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 781 782 // Increment age if obj still in new generation 783 obj->incr_age(); 784 age_table()->add(obj, s); 785 } 786 787 // Done, insert forward pointer to obj in this header 788 old->forward_to(obj); 789 790 return obj; 791 } 792 793 void DefNewGeneration::drain_promo_failure_scan_stack() { 794 while (!_promo_failure_scan_stack.is_empty()) { 795 oop obj = _promo_failure_scan_stack.pop(); 796 obj->oop_iterate(_promo_failure_scan_stack_closure); 797 } 798 } 799 800 void DefNewGeneration::save_marks() { 801 eden()->set_saved_mark(); 802 to()->set_saved_mark(); 803 from()->set_saved_mark(); 804 } 805 806 807 void DefNewGeneration::reset_saved_marks() { 808 eden()->reset_saved_mark(); 809 to()->reset_saved_mark(); 810 from()->reset_saved_mark(); 811 } 812 813 814 bool DefNewGeneration::no_allocs_since_save_marks() { 815 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 816 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 817 return to()->saved_mark_at_top(); 818 } 819 820 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 821 \ 822 void DefNewGeneration:: \ 823 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 824 cl->set_generation(this); \ 825 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 826 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 827 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 828 cl->reset_generation(); \ 829 save_marks(); \ 830 } 831 832 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 833 834 #undef DefNew_SINCE_SAVE_MARKS_DEFN 835 836 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 837 size_t max_alloc_words) { 838 if (requestor == this || _promotion_failed) { 839 return; 840 } 841 assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation"); 842 843 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 844 if (to_space->top() > to_space->bottom()) { 845 trace("to_space not empty when contribute_scratch called"); 846 } 847 */ 848 849 ContiguousSpace* to_space = to(); 850 assert(to_space->end() >= to_space->top(), "pointers out of order"); 851 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 852 if (free_words >= MinFreeScratchWords) { 853 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 854 sb->num_words = free_words; 855 sb->next = list; 856 list = sb; 857 } 858 } 859 860 void DefNewGeneration::reset_scratch() { 861 // If contributing scratch in to_space, mangle all of 862 // to_space if ZapUnusedHeapArea. This is needed because 863 // top is not maintained while using to-space as scratch. 864 if (ZapUnusedHeapArea) { 865 to()->mangle_unused_area_complete(); 866 } 867 } 868 869 bool DefNewGeneration::collection_attempt_is_safe() { 870 if (!to()->is_empty()) { 871 log_trace(gc)(":: to is not empty ::"); 872 return false; 873 } 874 if (_old_gen == NULL) { 875 GenCollectedHeap* gch = GenCollectedHeap::heap(); 876 _old_gen = gch->old_gen(); 877 } 878 return _old_gen->promotion_attempt_is_safe(used()); 879 } 880 881 void DefNewGeneration::gc_epilogue(bool full) { 882 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 883 884 assert(!GC_locker::is_active(), "We should not be executing here"); 885 // Check if the heap is approaching full after a collection has 886 // been done. Generally the young generation is empty at 887 // a minimum at the end of a collection. If it is not, then 888 // the heap is approaching full. 889 GenCollectedHeap* gch = GenCollectedHeap::heap(); 890 if (full) { 891 DEBUG_ONLY(seen_incremental_collection_failed = false;) 892 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 893 log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 894 GCCause::to_string(gch->gc_cause())); 895 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 896 set_should_allocate_from_space(); // we seem to be running out of space 897 } else { 898 log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 899 GCCause::to_string(gch->gc_cause())); 900 gch->clear_incremental_collection_failed(); // We just did a full collection 901 clear_should_allocate_from_space(); // if set 902 } 903 } else { 904 #ifdef ASSERT 905 // It is possible that incremental_collection_failed() == true 906 // here, because an attempted scavenge did not succeed. The policy 907 // is normally expected to cause a full collection which should 908 // clear that condition, so we should not be here twice in a row 909 // with incremental_collection_failed() == true without having done 910 // a full collection in between. 911 if (!seen_incremental_collection_failed && 912 gch->incremental_collection_failed()) { 913 log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 914 GCCause::to_string(gch->gc_cause())); 915 seen_incremental_collection_failed = true; 916 } else if (seen_incremental_collection_failed) { 917 log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 918 GCCause::to_string(gch->gc_cause())); 919 assert(gch->gc_cause() == GCCause::_scavenge_alot || 920 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 921 !gch->incremental_collection_failed(), 922 "Twice in a row"); 923 seen_incremental_collection_failed = false; 924 } 925 #endif // ASSERT 926 } 927 928 if (ZapUnusedHeapArea) { 929 eden()->check_mangled_unused_area_complete(); 930 from()->check_mangled_unused_area_complete(); 931 to()->check_mangled_unused_area_complete(); 932 } 933 934 if (!CleanChunkPoolAsync) { 935 Chunk::clean_chunk_pool(); 936 } 937 938 // update the generation and space performance counters 939 update_counters(); 940 gch->collector_policy()->counters()->update_counters(); 941 } 942 943 void DefNewGeneration::record_spaces_top() { 944 assert(ZapUnusedHeapArea, "Not mangling unused space"); 945 eden()->set_top_for_allocations(); 946 to()->set_top_for_allocations(); 947 from()->set_top_for_allocations(); 948 } 949 950 void DefNewGeneration::ref_processor_init() { 951 Generation::ref_processor_init(); 952 } 953 954 955 void DefNewGeneration::update_counters() { 956 if (UsePerfData) { 957 _eden_counters->update_all(); 958 _from_counters->update_all(); 959 _to_counters->update_all(); 960 _gen_counters->update_all(); 961 } 962 } 963 964 void DefNewGeneration::verify() { 965 eden()->verify(); 966 from()->verify(); 967 to()->verify(); 968 } 969 970 void DefNewGeneration::print_on(outputStream* st) const { 971 Generation::print_on(st); 972 st->print(" eden"); 973 eden()->print_on(st); 974 st->print(" from"); 975 from()->print_on(st); 976 st->print(" to "); 977 to()->print_on(st); 978 } 979 980 981 const char* DefNewGeneration::name() const { 982 return "def new generation"; 983 } 984 985 // Moved from inline file as they are not called inline 986 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 987 return eden(); 988 } 989 990 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { 991 // This is the slow-path allocation for the DefNewGeneration. 992 // Most allocations are fast-path in compiled code. 993 // We try to allocate from the eden. If that works, we are happy. 994 // Note that since DefNewGeneration supports lock-free allocation, we 995 // have to use it here, as well. 996 HeapWord* result = eden()->par_allocate(word_size); 997 if (result != NULL) { 998 if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 999 _old_gen->sample_eden_chunk(); 1000 } 1001 } else { 1002 // If the eden is full and the last collection bailed out, we are running 1003 // out of heap space, and we try to allocate the from-space, too. 1004 // allocate_from_space can't be inlined because that would introduce a 1005 // circular dependency at compile time. 1006 result = allocate_from_space(word_size); 1007 } 1008 return result; 1009 } 1010 1011 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1012 bool is_tlab) { 1013 HeapWord* res = eden()->par_allocate(word_size); 1014 if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 1015 _old_gen->sample_eden_chunk(); 1016 } 1017 return res; 1018 } 1019 1020 size_t DefNewGeneration::tlab_capacity() const { 1021 return eden()->capacity(); 1022 } 1023 1024 size_t DefNewGeneration::tlab_used() const { 1025 return eden()->used(); 1026 } 1027 1028 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1029 return unsafe_max_alloc_nogc(); 1030 }