1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/serial/defNewGeneration.inline.hpp" 27 #include "gc/shared/ageTable.inline.hpp" 28 #include "gc/shared/cardTableRS.hpp" 29 #include "gc/shared/collectorCounters.hpp" 30 #include "gc/shared/gcHeapSummary.hpp" 31 #include "gc/shared/gcLocker.inline.hpp" 32 #include "gc/shared/gcPolicyCounters.hpp" 33 #include "gc/shared/gcTimer.hpp" 34 #include "gc/shared/gcTrace.hpp" 35 #include "gc/shared/gcTraceTime.inline.hpp" 36 #include "gc/shared/genCollectedHeap.hpp" 37 #include "gc/shared/genOopClosures.inline.hpp" 38 #include "gc/shared/generationSpec.hpp" 39 #include "gc/shared/referencePolicy.hpp" 40 #include "gc/shared/space.inline.hpp" 41 #include "gc/shared/spaceDecorator.hpp" 42 #include "gc/shared/strongRootsScope.hpp" 43 #include "logging/log.hpp" 44 #include "memory/iterator.hpp" 45 #include "oops/instanceRefKlass.hpp" 46 #include "oops/oop.inline.hpp" 47 #include "runtime/atomic.inline.hpp" 48 #include "runtime/java.hpp" 49 #include "runtime/prefetch.inline.hpp" 50 #include "runtime/thread.inline.hpp" 51 #include "utilities/copy.hpp" 52 #include "utilities/globalDefinitions.hpp" 53 #include "utilities/stack.inline.hpp" 54 #if INCLUDE_ALL_GCS 55 #include "gc/cms/parOopClosures.hpp" 56 #endif 57 58 // 59 // DefNewGeneration functions. 60 61 // Methods of protected closure types. 62 63 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) { 64 assert(_young_gen->kind() == Generation::ParNew || 65 _young_gen->kind() == Generation::DefNew, "Expected the young generation here"); 66 } 67 68 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 69 return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded(); 70 } 71 72 DefNewGeneration::KeepAliveClosure:: 73 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 74 _rs = GenCollectedHeap::heap()->rem_set(); 75 } 76 77 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 78 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 79 80 81 DefNewGeneration::FastKeepAliveClosure:: 82 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 83 DefNewGeneration::KeepAliveClosure(cl) { 84 _boundary = g->reserved().end(); 85 } 86 87 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 88 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 89 90 DefNewGeneration::EvacuateFollowersClosure:: 91 EvacuateFollowersClosure(GenCollectedHeap* gch, 92 ScanClosure* cur, 93 ScanClosure* older) : 94 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 95 {} 96 97 void DefNewGeneration::EvacuateFollowersClosure::do_void() { 98 do { 99 _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older); 100 } while (!_gch->no_allocs_since_save_marks()); 101 } 102 103 DefNewGeneration::FastEvacuateFollowersClosure:: 104 FastEvacuateFollowersClosure(GenCollectedHeap* gch, 105 FastScanClosure* cur, 106 FastScanClosure* older) : 107 _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) 108 { 109 assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew"); 110 _young_gen = (DefNewGeneration*)_gch->young_gen(); 111 } 112 113 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 114 do { 115 _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older); 116 } while (!_gch->no_allocs_since_save_marks()); 117 guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 118 } 119 120 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 121 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 122 { 123 _boundary = _g->reserved().end(); 124 } 125 126 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 127 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 128 129 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 130 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 131 { 132 _boundary = _g->reserved().end(); 133 } 134 135 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 136 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 137 138 void KlassScanClosure::do_klass(Klass* klass) { 139 NOT_PRODUCT(ResourceMark rm); 140 log_develop_trace(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", 141 p2i(klass), 142 klass->external_name(), 143 klass->has_modified_oops() ? "true" : "false"); 144 145 // If the klass has not been dirtied we know that there's 146 // no references into the young gen and we can skip it. 147 if (klass->has_modified_oops()) { 148 if (_accumulate_modified_oops) { 149 klass->accumulate_modified_oops(); 150 } 151 152 // Clear this state since we're going to scavenge all the metadata. 153 klass->clear_modified_oops(); 154 155 // Tell the closure which Klass is being scanned so that it can be dirtied 156 // if oops are left pointing into the young gen. 157 _scavenge_closure->set_scanned_klass(klass); 158 159 klass->oops_do(_scavenge_closure); 160 161 _scavenge_closure->set_scanned_klass(NULL); 162 } 163 } 164 165 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 166 _g(g) 167 { 168 _boundary = _g->reserved().end(); 169 } 170 171 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 172 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 173 174 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 175 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 176 177 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, 178 KlassRemSet* klass_rem_set) 179 : _scavenge_closure(scavenge_closure), 180 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} 181 182 183 DefNewGeneration::DefNewGeneration(ReservedSpace rs, 184 size_t initial_size, 185 const char* policy) 186 : Generation(rs, initial_size), 187 _promo_failure_drain_in_progress(false), 188 _should_allocate_from_space(false) 189 { 190 MemRegion cmr((HeapWord*)_virtual_space.low(), 191 (HeapWord*)_virtual_space.high()); 192 GenCollectedHeap* gch = GenCollectedHeap::heap(); 193 194 gch->barrier_set()->resize_covered_region(cmr); 195 196 _eden_space = new ContiguousSpace(); 197 _from_space = new ContiguousSpace(); 198 _to_space = new ContiguousSpace(); 199 200 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { 201 vm_exit_during_initialization("Could not allocate a new gen space"); 202 } 203 204 // Compute the maximum eden and survivor space sizes. These sizes 205 // are computed assuming the entire reserved space is committed. 206 // These values are exported as performance counters. 207 uintx alignment = gch->collector_policy()->space_alignment(); 208 uintx size = _virtual_space.reserved_size(); 209 _max_survivor_size = compute_survivor_size(size, alignment); 210 _max_eden_size = size - (2*_max_survivor_size); 211 212 // allocate the performance counters 213 GenCollectorPolicy* gcp = gch->gen_policy(); 214 215 // Generation counters -- generation 0, 3 subspaces 216 _gen_counters = new GenerationCounters("new", 0, 3, 217 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space); 218 _gc_counters = new CollectorCounters(policy, 0); 219 220 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 221 _gen_counters); 222 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 223 _gen_counters); 224 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 225 _gen_counters); 226 227 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 228 update_counters(); 229 _old_gen = NULL; 230 _tenuring_threshold = MaxTenuringThreshold; 231 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 232 233 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 234 } 235 236 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 237 bool clear_space, 238 bool mangle_space) { 239 uintx alignment = 240 GenCollectedHeap::heap()->collector_policy()->space_alignment(); 241 242 // If the spaces are being cleared (only done at heap initialization 243 // currently), the survivor spaces need not be empty. 244 // Otherwise, no care is taken for used areas in the survivor spaces 245 // so check. 246 assert(clear_space || (to()->is_empty() && from()->is_empty()), 247 "Initialization of the survivor spaces assumes these are empty"); 248 249 // Compute sizes 250 uintx size = _virtual_space.committed_size(); 251 uintx survivor_size = compute_survivor_size(size, alignment); 252 uintx eden_size = size - (2*survivor_size); 253 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 254 255 if (eden_size < minimum_eden_size) { 256 // May happen due to 64Kb rounding, if so adjust eden size back up 257 minimum_eden_size = align_size_up(minimum_eden_size, alignment); 258 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 259 uintx unaligned_survivor_size = 260 align_size_down(maximum_survivor_size, alignment); 261 survivor_size = MAX2(unaligned_survivor_size, alignment); 262 eden_size = size - (2*survivor_size); 263 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 264 assert(eden_size >= minimum_eden_size, "just checking"); 265 } 266 267 char *eden_start = _virtual_space.low(); 268 char *from_start = eden_start + eden_size; 269 char *to_start = from_start + survivor_size; 270 char *to_end = to_start + survivor_size; 271 272 assert(to_end == _virtual_space.high(), "just checking"); 273 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); 274 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); 275 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); 276 277 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 278 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 279 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 280 281 // A minimum eden size implies that there is a part of eden that 282 // is being used and that affects the initialization of any 283 // newly formed eden. 284 bool live_in_eden = minimum_eden_size > 0; 285 286 // If not clearing the spaces, do some checking to verify that 287 // the space are already mangled. 288 if (!clear_space) { 289 // Must check mangling before the spaces are reshaped. Otherwise, 290 // the bottom or end of one space may have moved into another 291 // a failure of the check may not correctly indicate which space 292 // is not properly mangled. 293 if (ZapUnusedHeapArea) { 294 HeapWord* limit = (HeapWord*) _virtual_space.high(); 295 eden()->check_mangled_unused_area(limit); 296 from()->check_mangled_unused_area(limit); 297 to()->check_mangled_unused_area(limit); 298 } 299 } 300 301 // Reset the spaces for their new regions. 302 eden()->initialize(edenMR, 303 clear_space && !live_in_eden, 304 SpaceDecorator::Mangle); 305 // If clear_space and live_in_eden, we will not have cleared any 306 // portion of eden above its top. This can cause newly 307 // expanded space not to be mangled if using ZapUnusedHeapArea. 308 // We explicitly do such mangling here. 309 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 310 eden()->mangle_unused_area(); 311 } 312 from()->initialize(fromMR, clear_space, mangle_space); 313 to()->initialize(toMR, clear_space, mangle_space); 314 315 // Set next compaction spaces. 316 eden()->set_next_compaction_space(from()); 317 // The to-space is normally empty before a compaction so need 318 // not be considered. The exception is during promotion 319 // failure handling when to-space can contain live objects. 320 from()->set_next_compaction_space(NULL); 321 } 322 323 void DefNewGeneration::swap_spaces() { 324 ContiguousSpace* s = from(); 325 _from_space = to(); 326 _to_space = s; 327 eden()->set_next_compaction_space(from()); 328 // The to-space is normally empty before a compaction so need 329 // not be considered. The exception is during promotion 330 // failure handling when to-space can contain live objects. 331 from()->set_next_compaction_space(NULL); 332 333 if (UsePerfData) { 334 CSpaceCounters* c = _from_counters; 335 _from_counters = _to_counters; 336 _to_counters = c; 337 } 338 } 339 340 bool DefNewGeneration::expand(size_t bytes) { 341 MutexLocker x(ExpandHeap_lock); 342 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 343 bool success = _virtual_space.expand_by(bytes); 344 if (success && ZapUnusedHeapArea) { 345 // Mangle newly committed space immediately because it 346 // can be done here more simply that after the new 347 // spaces have been computed. 348 HeapWord* new_high = (HeapWord*) _virtual_space.high(); 349 MemRegion mangle_region(prev_high, new_high); 350 SpaceMangler::mangle_region(mangle_region); 351 } 352 353 // Do not attempt an expand-to-the reserve size. The 354 // request should properly observe the maximum size of 355 // the generation so an expand-to-reserve should be 356 // unnecessary. Also a second call to expand-to-reserve 357 // value potentially can cause an undue expansion. 358 // For example if the first expand fail for unknown reasons, 359 // but the second succeeds and expands the heap to its maximum 360 // value. 361 if (GCLocker::is_active()) { 362 log_debug(gc)("Garbage collection disabled, expanded heap instead"); 363 } 364 365 return success; 366 } 367 368 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate, 369 size_t new_size_before, 370 size_t alignment) const { 371 size_t desired_new_size = new_size_before; 372 373 if (NewSizeThreadIncrease > 0) { 374 int threads_count; 375 size_t thread_increase_size = 0; 376 377 // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'. 378 threads_count = Threads::number_of_non_daemon_threads(); 379 if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) { 380 thread_increase_size = threads_count * NewSizeThreadIncrease; 381 382 // 2. Check an overflow at 'new_size_candidate + thread_increase_size'. 383 if (new_size_candidate <= max_uintx - thread_increase_size) { 384 new_size_candidate += thread_increase_size; 385 386 // 3. Check an overflow at 'align_size_up'. 387 size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1)); 388 if (new_size_candidate <= aligned_max) { 389 desired_new_size = align_size_up(new_size_candidate, alignment); 390 } 391 } 392 } 393 } 394 395 return desired_new_size; 396 } 397 398 void DefNewGeneration::compute_new_size() { 399 // This is called after a GC that includes the old generation, so from-space 400 // will normally be empty. 401 // Note that we check both spaces, since if scavenge failed they revert roles. 402 // If not we bail out (otherwise we would have to relocate the objects). 403 if (!from()->is_empty() || !to()->is_empty()) { 404 return; 405 } 406 407 GenCollectedHeap* gch = GenCollectedHeap::heap(); 408 409 size_t old_size = gch->old_gen()->capacity(); 410 size_t new_size_before = _virtual_space.committed_size(); 411 size_t min_new_size = initial_size(); 412 size_t max_new_size = reserved().byte_size(); 413 assert(min_new_size <= new_size_before && 414 new_size_before <= max_new_size, 415 "just checking"); 416 // All space sizes must be multiples of Generation::GenGrain. 417 size_t alignment = Generation::GenGrain; 418 419 int threads_count = 0; 420 size_t thread_increase_size = 0; 421 422 size_t new_size_candidate = old_size / NewRatio; 423 // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease 424 // and reverts to previous value if any overflow happens 425 size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment); 426 427 // Adjust new generation size 428 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 429 assert(desired_new_size <= max_new_size, "just checking"); 430 431 bool changed = false; 432 if (desired_new_size > new_size_before) { 433 size_t change = desired_new_size - new_size_before; 434 assert(change % alignment == 0, "just checking"); 435 if (expand(change)) { 436 changed = true; 437 } 438 // If the heap failed to expand to the desired size, 439 // "changed" will be false. If the expansion failed 440 // (and at this point it was expected to succeed), 441 // ignore the failure (leaving "changed" as false). 442 } 443 if (desired_new_size < new_size_before && eden()->is_empty()) { 444 // bail out of shrinking if objects in eden 445 size_t change = new_size_before - desired_new_size; 446 assert(change % alignment == 0, "just checking"); 447 _virtual_space.shrink_by(change); 448 changed = true; 449 } 450 if (changed) { 451 // The spaces have already been mangled at this point but 452 // may not have been cleared (set top = bottom) and should be. 453 // Mangling was done when the heap was being expanded. 454 compute_space_boundaries(eden()->used(), 455 SpaceDecorator::Clear, 456 SpaceDecorator::DontMangle); 457 MemRegion cmr((HeapWord*)_virtual_space.low(), 458 (HeapWord*)_virtual_space.high()); 459 gch->barrier_set()->resize_covered_region(cmr); 460 461 log_debug(gc, heap, ergo)( 462 "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 463 new_size_before/K, _virtual_space.committed_size()/K, 464 eden()->capacity()/K, from()->capacity()/K); 465 log_trace(gc, heap, ergo)( 466 " [allowed " SIZE_FORMAT "K extra for %d threads]", 467 thread_increase_size/K, threads_count); 468 } 469 } 470 471 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) { 472 assert(false, "NYI -- are you sure you want to call this?"); 473 } 474 475 476 size_t DefNewGeneration::capacity() const { 477 return eden()->capacity() 478 + from()->capacity(); // to() is only used during scavenge 479 } 480 481 482 size_t DefNewGeneration::used() const { 483 return eden()->used() 484 + from()->used(); // to() is only used during scavenge 485 } 486 487 488 size_t DefNewGeneration::free() const { 489 return eden()->free() 490 + from()->free(); // to() is only used during scavenge 491 } 492 493 size_t DefNewGeneration::max_capacity() const { 494 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 495 const size_t reserved_bytes = reserved().byte_size(); 496 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 497 } 498 499 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 500 return eden()->free(); 501 } 502 503 size_t DefNewGeneration::capacity_before_gc() const { 504 return eden()->capacity(); 505 } 506 507 size_t DefNewGeneration::contiguous_available() const { 508 return eden()->free(); 509 } 510 511 512 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } 513 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 514 515 void DefNewGeneration::object_iterate(ObjectClosure* blk) { 516 eden()->object_iterate(blk); 517 from()->object_iterate(blk); 518 } 519 520 521 void DefNewGeneration::space_iterate(SpaceClosure* blk, 522 bool usedOnly) { 523 blk->do_space(eden()); 524 blk->do_space(from()); 525 blk->do_space(to()); 526 } 527 528 // The last collection bailed out, we are running out of heap space, 529 // so we try to allocate the from-space, too. 530 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 531 bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc(); 532 533 // If the Heap_lock is not locked by this thread, this will be called 534 // again later with the Heap_lock held. 535 bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread())); 536 537 HeapWord* result = NULL; 538 if (do_alloc) { 539 result = from()->allocate(size); 540 } 541 542 log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "): will_fail: %s heap_lock: %s free: " SIZE_FORMAT "%s%s returns %s", 543 size, 544 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 545 "true" : "false", 546 Heap_lock->is_locked() ? "locked" : "unlocked", 547 from()->free(), 548 should_try_alloc ? "" : " should_allocate_from_space: NOT", 549 do_alloc ? " Heap_lock is not owned by self" : "", 550 result == NULL ? "NULL" : "object"); 551 552 return result; 553 } 554 555 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 556 bool is_tlab, 557 bool parallel) { 558 // We don't attempt to expand the young generation (but perhaps we should.) 559 return allocate(size, is_tlab); 560 } 561 562 void DefNewGeneration::adjust_desired_tenuring_threshold() { 563 // Set the desired survivor size to half the real survivor space 564 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->collector_policy()->counters(); 565 _tenuring_threshold = 566 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters); 567 } 568 569 void DefNewGeneration::collect(bool full, 570 bool clear_all_soft_refs, 571 size_t size, 572 bool is_tlab) { 573 assert(full || size > 0, "otherwise we don't want to collect"); 574 575 GenCollectedHeap* gch = GenCollectedHeap::heap(); 576 577 _gc_timer->register_gc_start(); 578 DefNewTracer gc_tracer; 579 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 580 581 _old_gen = gch->old_gen(); 582 583 // If the next generation is too full to accommodate promotion 584 // from this generation, pass on collection; let the next generation 585 // do it. 586 if (!collection_attempt_is_safe()) { 587 log_trace(gc)(":: Collection attempt not safe ::"); 588 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 589 return; 590 } 591 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 592 593 init_assuming_no_promotion_failure(); 594 595 GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause()); 596 597 gch->trace_heap_before_gc(&gc_tracer); 598 599 // These can be shared for all code paths 600 IsAliveClosure is_alive(this); 601 ScanWeakRefClosure scan_weak_ref(this); 602 603 age_table()->clear(); 604 to()->clear(SpaceDecorator::Mangle); 605 606 gch->rem_set()->prepare_for_younger_refs_iterate(false); 607 608 assert(gch->no_allocs_since_save_marks(), 609 "save marks have not been newly set."); 610 611 // Not very pretty. 612 CollectorPolicy* cp = gch->collector_policy(); 613 614 FastScanClosure fsc_with_no_gc_barrier(this, false); 615 FastScanClosure fsc_with_gc_barrier(this, true); 616 617 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, 618 gch->rem_set()->klass_rem_set()); 619 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, 620 &fsc_with_no_gc_barrier, 621 false); 622 623 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 624 FastEvacuateFollowersClosure evacuate_followers(gch, 625 &fsc_with_no_gc_barrier, 626 &fsc_with_gc_barrier); 627 628 assert(gch->no_allocs_since_save_marks(), 629 "save marks have not been newly set."); 630 631 { 632 // DefNew needs to run with n_threads == 0, to make sure the serial 633 // version of the card table scanning code is used. 634 // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel. 635 StrongRootsScope srs(0); 636 637 gch->gen_process_roots(&srs, 638 GenCollectedHeap::YoungGen, 639 true, // Process younger gens, if any, 640 // as strong roots. 641 GenCollectedHeap::SO_ScavengeCodeCache, 642 GenCollectedHeap::StrongAndWeakRoots, 643 &fsc_with_no_gc_barrier, 644 &fsc_with_gc_barrier, 645 &cld_scan_closure); 646 } 647 648 // "evacuate followers". 649 evacuate_followers.do_void(); 650 651 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 652 ReferenceProcessor* rp = ref_processor(); 653 rp->setup_policy(clear_all_soft_refs); 654 const ReferenceProcessorStats& stats = 655 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 656 NULL, _gc_timer); 657 gc_tracer.report_gc_reference_stats(stats); 658 gc_tracer.report_tenuring_threshold(tenuring_threshold()); 659 660 if (!_promotion_failed) { 661 // Swap the survivor spaces. 662 eden()->clear(SpaceDecorator::Mangle); 663 from()->clear(SpaceDecorator::Mangle); 664 if (ZapUnusedHeapArea) { 665 // This is now done here because of the piece-meal mangling which 666 // can check for valid mangling at intermediate points in the 667 // collection(s). When a young collection fails to collect 668 // sufficient space resizing of the young generation can occur 669 // an redistribute the spaces in the young generation. Mangle 670 // here so that unzapped regions don't get distributed to 671 // other spaces. 672 to()->mangle_unused_area(); 673 } 674 swap_spaces(); 675 676 assert(to()->is_empty(), "to space should be empty now"); 677 678 adjust_desired_tenuring_threshold(); 679 680 // A successful scavenge should restart the GC time limit count which is 681 // for full GC's. 682 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 683 size_policy->reset_gc_overhead_limit_count(); 684 assert(!gch->incremental_collection_failed(), "Should be clear"); 685 } else { 686 assert(_promo_failure_scan_stack.is_empty(), "post condition"); 687 _promo_failure_scan_stack.clear(true); // Clear cached segments. 688 689 remove_forwarding_pointers(); 690 log_debug(gc)("Promotion failed"); 691 // Add to-space to the list of space to compact 692 // when a promotion failure has occurred. In that 693 // case there can be live objects in to-space 694 // as a result of a partial evacuation of eden 695 // and from-space. 696 swap_spaces(); // For uniformity wrt ParNewGeneration. 697 from()->set_next_compaction_space(to()); 698 gch->set_incremental_collection_failed(); 699 700 // Inform the next generation that a promotion failure occurred. 701 _old_gen->promotion_failure_occurred(); 702 gc_tracer.report_promotion_failed(_promotion_failed_info); 703 704 // Reset the PromotionFailureALot counters. 705 NOT_PRODUCT(gch->reset_promotion_should_fail();) 706 } 707 // set new iteration safe limit for the survivor spaces 708 from()->set_concurrent_iteration_safe_limit(from()->top()); 709 to()->set_concurrent_iteration_safe_limit(to()->top()); 710 711 // We need to use a monotonically non-decreasing time in ms 712 // or we will see time-warp warnings and os::javaTimeMillis() 713 // does not guarantee monotonicity. 714 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 715 update_time_of_last_gc(now); 716 717 gch->trace_heap_after_gc(&gc_tracer); 718 719 _gc_timer->register_gc_end(); 720 721 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 722 } 723 724 class RemoveForwardPointerClosure: public ObjectClosure { 725 public: 726 void do_object(oop obj) { 727 obj->init_mark(); 728 } 729 }; 730 731 void DefNewGeneration::init_assuming_no_promotion_failure() { 732 _promotion_failed = false; 733 _promotion_failed_info.reset(); 734 from()->set_next_compaction_space(NULL); 735 } 736 737 void DefNewGeneration::remove_forwarding_pointers() { 738 RemoveForwardPointerClosure rspc; 739 eden()->object_iterate(&rspc); 740 from()->object_iterate(&rspc); 741 742 // Now restore saved marks, if any. 743 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), 744 "should be the same"); 745 while (!_objs_with_preserved_marks.is_empty()) { 746 oop obj = _objs_with_preserved_marks.pop(); 747 markOop m = _preserved_marks_of_objs.pop(); 748 obj->set_mark(m); 749 } 750 _objs_with_preserved_marks.clear(true); 751 _preserved_marks_of_objs.clear(true); 752 } 753 754 void DefNewGeneration::preserve_mark(oop obj, markOop m) { 755 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), 756 "Oversaving!"); 757 _objs_with_preserved_marks.push(obj); 758 _preserved_marks_of_objs.push(m); 759 } 760 761 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 762 if (m->must_be_preserved_for_promotion_failure(obj)) { 763 preserve_mark(obj, m); 764 } 765 } 766 767 void DefNewGeneration::handle_promotion_failure(oop old) { 768 log_debug(gc, promotion)("Promotion failure size = %d) ", old->size()); 769 770 _promotion_failed = true; 771 _promotion_failed_info.register_copy_failure(old->size()); 772 preserve_mark_if_necessary(old, old->mark()); 773 // forward to self 774 old->forward_to(old); 775 776 _promo_failure_scan_stack.push(old); 777 778 if (!_promo_failure_drain_in_progress) { 779 // prevent recursion in copy_to_survivor_space() 780 _promo_failure_drain_in_progress = true; 781 drain_promo_failure_scan_stack(); 782 _promo_failure_drain_in_progress = false; 783 } 784 } 785 786 oop DefNewGeneration::copy_to_survivor_space(oop old) { 787 assert(is_in_reserved(old) && !old->is_forwarded(), 788 "shouldn't be scavenging this oop"); 789 size_t s = old->size(); 790 oop obj = NULL; 791 792 // Try allocating obj in to-space (unless too old) 793 if (old->age() < tenuring_threshold()) { 794 obj = (oop) to()->allocate_aligned(s); 795 } 796 797 // Otherwise try allocating obj tenured 798 if (obj == NULL) { 799 obj = _old_gen->promote(old, s); 800 if (obj == NULL) { 801 handle_promotion_failure(old); 802 return old; 803 } 804 } else { 805 // Prefetch beyond obj 806 const intx interval = PrefetchCopyIntervalInBytes; 807 Prefetch::write(obj, interval); 808 809 // Copy obj 810 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 811 812 // Increment age if obj still in new generation 813 obj->incr_age(); 814 age_table()->add(obj, s); 815 } 816 817 // Done, insert forward pointer to obj in this header 818 old->forward_to(obj); 819 820 return obj; 821 } 822 823 void DefNewGeneration::drain_promo_failure_scan_stack() { 824 while (!_promo_failure_scan_stack.is_empty()) { 825 oop obj = _promo_failure_scan_stack.pop(); 826 obj->oop_iterate(_promo_failure_scan_stack_closure); 827 } 828 } 829 830 void DefNewGeneration::save_marks() { 831 eden()->set_saved_mark(); 832 to()->set_saved_mark(); 833 from()->set_saved_mark(); 834 } 835 836 837 void DefNewGeneration::reset_saved_marks() { 838 eden()->reset_saved_mark(); 839 to()->reset_saved_mark(); 840 from()->reset_saved_mark(); 841 } 842 843 844 bool DefNewGeneration::no_allocs_since_save_marks() { 845 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 846 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 847 return to()->saved_mark_at_top(); 848 } 849 850 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 851 \ 852 void DefNewGeneration:: \ 853 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 854 cl->set_generation(this); \ 855 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 856 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 857 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 858 cl->reset_generation(); \ 859 save_marks(); \ 860 } 861 862 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 863 864 #undef DefNew_SINCE_SAVE_MARKS_DEFN 865 866 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 867 size_t max_alloc_words) { 868 if (requestor == this || _promotion_failed) { 869 return; 870 } 871 assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation"); 872 873 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 874 if (to_space->top() > to_space->bottom()) { 875 trace("to_space not empty when contribute_scratch called"); 876 } 877 */ 878 879 ContiguousSpace* to_space = to(); 880 assert(to_space->end() >= to_space->top(), "pointers out of order"); 881 size_t free_words = pointer_delta(to_space->end(), to_space->top()); 882 if (free_words >= MinFreeScratchWords) { 883 ScratchBlock* sb = (ScratchBlock*)to_space->top(); 884 sb->num_words = free_words; 885 sb->next = list; 886 list = sb; 887 } 888 } 889 890 void DefNewGeneration::reset_scratch() { 891 // If contributing scratch in to_space, mangle all of 892 // to_space if ZapUnusedHeapArea. This is needed because 893 // top is not maintained while using to-space as scratch. 894 if (ZapUnusedHeapArea) { 895 to()->mangle_unused_area_complete(); 896 } 897 } 898 899 bool DefNewGeneration::collection_attempt_is_safe() { 900 if (!to()->is_empty()) { 901 log_trace(gc)(":: to is not empty ::"); 902 return false; 903 } 904 if (_old_gen == NULL) { 905 GenCollectedHeap* gch = GenCollectedHeap::heap(); 906 _old_gen = gch->old_gen(); 907 } 908 return _old_gen->promotion_attempt_is_safe(used()); 909 } 910 911 void DefNewGeneration::gc_epilogue(bool full) { 912 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 913 914 assert(!GCLocker::is_active(), "We should not be executing here"); 915 // Check if the heap is approaching full after a collection has 916 // been done. Generally the young generation is empty at 917 // a minimum at the end of a collection. If it is not, then 918 // the heap is approaching full. 919 GenCollectedHeap* gch = GenCollectedHeap::heap(); 920 if (full) { 921 DEBUG_ONLY(seen_incremental_collection_failed = false;) 922 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 923 log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 924 GCCause::to_string(gch->gc_cause())); 925 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 926 set_should_allocate_from_space(); // we seem to be running out of space 927 } else { 928 log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 929 GCCause::to_string(gch->gc_cause())); 930 gch->clear_incremental_collection_failed(); // We just did a full collection 931 clear_should_allocate_from_space(); // if set 932 } 933 } else { 934 #ifdef ASSERT 935 // It is possible that incremental_collection_failed() == true 936 // here, because an attempted scavenge did not succeed. The policy 937 // is normally expected to cause a full collection which should 938 // clear that condition, so we should not be here twice in a row 939 // with incremental_collection_failed() == true without having done 940 // a full collection in between. 941 if (!seen_incremental_collection_failed && 942 gch->incremental_collection_failed()) { 943 log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 944 GCCause::to_string(gch->gc_cause())); 945 seen_incremental_collection_failed = true; 946 } else if (seen_incremental_collection_failed) { 947 log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 948 GCCause::to_string(gch->gc_cause())); 949 assert(gch->gc_cause() == GCCause::_scavenge_alot || 950 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 951 !gch->incremental_collection_failed(), 952 "Twice in a row"); 953 seen_incremental_collection_failed = false; 954 } 955 #endif // ASSERT 956 } 957 958 if (ZapUnusedHeapArea) { 959 eden()->check_mangled_unused_area_complete(); 960 from()->check_mangled_unused_area_complete(); 961 to()->check_mangled_unused_area_complete(); 962 } 963 964 if (!CleanChunkPoolAsync) { 965 Chunk::clean_chunk_pool(); 966 } 967 968 // update the generation and space performance counters 969 update_counters(); 970 gch->collector_policy()->counters()->update_counters(); 971 } 972 973 void DefNewGeneration::record_spaces_top() { 974 assert(ZapUnusedHeapArea, "Not mangling unused space"); 975 eden()->set_top_for_allocations(); 976 to()->set_top_for_allocations(); 977 from()->set_top_for_allocations(); 978 } 979 980 void DefNewGeneration::ref_processor_init() { 981 Generation::ref_processor_init(); 982 } 983 984 985 void DefNewGeneration::update_counters() { 986 if (UsePerfData) { 987 _eden_counters->update_all(); 988 _from_counters->update_all(); 989 _to_counters->update_all(); 990 _gen_counters->update_all(); 991 } 992 } 993 994 void DefNewGeneration::verify() { 995 eden()->verify(); 996 from()->verify(); 997 to()->verify(); 998 } 999 1000 void DefNewGeneration::print_on(outputStream* st) const { 1001 Generation::print_on(st); 1002 st->print(" eden"); 1003 eden()->print_on(st); 1004 st->print(" from"); 1005 from()->print_on(st); 1006 st->print(" to "); 1007 to()->print_on(st); 1008 } 1009 1010 1011 const char* DefNewGeneration::name() const { 1012 return "def new generation"; 1013 } 1014 1015 // Moved from inline file as they are not called inline 1016 CompactibleSpace* DefNewGeneration::first_compaction_space() const { 1017 return eden(); 1018 } 1019 1020 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { 1021 // This is the slow-path allocation for the DefNewGeneration. 1022 // Most allocations are fast-path in compiled code. 1023 // We try to allocate from the eden. If that works, we are happy. 1024 // Note that since DefNewGeneration supports lock-free allocation, we 1025 // have to use it here, as well. 1026 HeapWord* result = eden()->par_allocate(word_size); 1027 if (result != NULL) { 1028 if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 1029 _old_gen->sample_eden_chunk(); 1030 } 1031 } else { 1032 // If the eden is full and the last collection bailed out, we are running 1033 // out of heap space, and we try to allocate the from-space, too. 1034 // allocate_from_space can't be inlined because that would introduce a 1035 // circular dependency at compile time. 1036 result = allocate_from_space(word_size); 1037 } 1038 return result; 1039 } 1040 1041 HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1042 bool is_tlab) { 1043 HeapWord* res = eden()->par_allocate(word_size); 1044 if (CMSEdenChunksRecordAlways && _old_gen != NULL) { 1045 _old_gen->sample_eden_chunk(); 1046 } 1047 return res; 1048 } 1049 1050 size_t DefNewGeneration::tlab_capacity() const { 1051 return eden()->capacity(); 1052 } 1053 1054 size_t DefNewGeneration::tlab_used() const { 1055 return eden()->used(); 1056 } 1057 1058 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1059 return unsafe_max_alloc_nogc(); 1060 }