1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/serial/defNewGeneration.inline.hpp"
  27 #include "gc/shared/cardTableRS.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shared/gcHeapSummary.hpp"
  30 #include "gc/shared/gcLocker.inline.hpp"
  31 #include "gc/shared/gcPolicyCounters.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/gcTrace.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/genCollectedHeap.hpp"
  36 #include "gc/shared/genOopClosures.inline.hpp"
  37 #include "gc/shared/generationSpec.hpp"
  38 #include "gc/shared/referencePolicy.hpp"
  39 #include "gc/shared/space.inline.hpp"
  40 #include "gc/shared/spaceDecorator.hpp"
  41 #include "gc/shared/strongRootsScope.hpp"
  42 #include "logging/log.hpp"
  43 #include "memory/iterator.hpp"
  44 #include "oops/instanceRefKlass.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/atomic.inline.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/prefetch.inline.hpp"
  49 #include "runtime/thread.inline.hpp"
  50 #include "utilities/copy.hpp"
  51 #include "utilities/globalDefinitions.hpp"
  52 #include "utilities/stack.inline.hpp"
  53 #if INCLUDE_ALL_GCS
  54 #include "gc/cms/parOopClosures.hpp"
  55 #endif
  56 
  57 //
  58 // DefNewGeneration functions.
  59 
  60 // Methods of protected closure types.
  61 
  62 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
  63   assert(_young_gen->kind() == Generation::ParNew ||
  64          _young_gen->kind() == Generation::DefNew, "Expected the young generation here");
  65 }
  66 
  67 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
  68   return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded();
  69 }
  70 
  71 DefNewGeneration::KeepAliveClosure::
  72 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
  73   _rs = GenCollectedHeap::heap()->rem_set();
  74 }
  75 
  76 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  77 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  78 
  79 
  80 DefNewGeneration::FastKeepAliveClosure::
  81 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
  82   DefNewGeneration::KeepAliveClosure(cl) {
  83   _boundary = g->reserved().end();
  84 }
  85 
  86 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  87 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  88 
  89 DefNewGeneration::EvacuateFollowersClosure::
  90 EvacuateFollowersClosure(GenCollectedHeap* gch,
  91                          ScanClosure* cur,
  92                          ScanClosure* older) :
  93   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
  94 {}
  95 
  96 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
  97   do {
  98     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
  99   } while (!_gch->no_allocs_since_save_marks());
 100 }
 101 
 102 DefNewGeneration::FastEvacuateFollowersClosure::
 103 FastEvacuateFollowersClosure(GenCollectedHeap* gch,
 104                              FastScanClosure* cur,
 105                              FastScanClosure* older) :
 106   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
 107 {
 108   assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
 109   _young_gen = (DefNewGeneration*)_gch->young_gen();
 110 }
 111 
 112 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 113   do {
 114     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
 115   } while (!_gch->no_allocs_since_save_marks());
 116   guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 117 }
 118 
 119 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
 120     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 121 {
 122   _boundary = _g->reserved().end();
 123 }
 124 
 125 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 126 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 127 
 128 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 129     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 130 {
 131   _boundary = _g->reserved().end();
 132 }
 133 
 134 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 135 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 136 
 137 void KlassScanClosure::do_klass(Klass* klass) {
 138   NOT_PRODUCT(ResourceMark rm);
 139   log_develop_trace(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
 140                                   p2i(klass),
 141                                   klass->external_name(),
 142                                   klass->has_modified_oops() ? "true" : "false");
 143 
 144   // If the klass has not been dirtied we know that there's
 145   // no references into  the young gen and we can skip it.
 146   if (klass->has_modified_oops()) {
 147     if (_accumulate_modified_oops) {
 148       klass->accumulate_modified_oops();
 149     }
 150 
 151     // Clear this state since we're going to scavenge all the metadata.
 152     klass->clear_modified_oops();
 153 
 154     // Tell the closure which Klass is being scanned so that it can be dirtied
 155     // if oops are left pointing into the young gen.
 156     _scavenge_closure->set_scanned_klass(klass);
 157 
 158     klass->oops_do(_scavenge_closure);
 159 
 160     _scavenge_closure->set_scanned_klass(NULL);
 161   }
 162 }
 163 
 164 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 165   _g(g)
 166 {
 167   _boundary = _g->reserved().end();
 168 }
 169 
 170 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 171 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 172 
 173 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 174 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 175 
 176 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
 177                                    KlassRemSet* klass_rem_set)
 178     : _scavenge_closure(scavenge_closure),
 179       _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
 180 
 181 
 182 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 183                                    size_t initial_size,
 184                                    const char* policy)
 185   : Generation(rs, initial_size),
 186     _promo_failure_drain_in_progress(false),
 187     _should_allocate_from_space(false)
 188 {
 189   MemRegion cmr((HeapWord*)_virtual_space.low(),
 190                 (HeapWord*)_virtual_space.high());
 191   GenCollectedHeap* gch = GenCollectedHeap::heap();
 192 
 193   gch->barrier_set()->resize_covered_region(cmr);
 194 
 195   _eden_space = new ContiguousSpace();
 196   _from_space = new ContiguousSpace();
 197   _to_space   = new ContiguousSpace();
 198 
 199   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
 200     vm_exit_during_initialization("Could not allocate a new gen space");
 201   }
 202 
 203   // Compute the maximum eden and survivor space sizes. These sizes
 204   // are computed assuming the entire reserved space is committed.
 205   // These values are exported as performance counters.
 206   uintx alignment = gch->collector_policy()->space_alignment();
 207   uintx size = _virtual_space.reserved_size();
 208   _max_survivor_size = compute_survivor_size(size, alignment);
 209   _max_eden_size = size - (2*_max_survivor_size);
 210 
 211   // allocate the performance counters
 212   GenCollectorPolicy* gcp = gch->gen_policy();
 213 
 214   // Generation counters -- generation 0, 3 subspaces
 215   _gen_counters = new GenerationCounters("new", 0, 3,
 216       gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
 217   _gc_counters = new CollectorCounters(policy, 0);
 218 
 219   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 220                                       _gen_counters);
 221   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 222                                       _gen_counters);
 223   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 224                                     _gen_counters);
 225 
 226   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 227   update_counters();
 228   _old_gen = NULL;
 229   _tenuring_threshold = MaxTenuringThreshold;
 230   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 231 
 232   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 233 }
 234 
 235 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 236                                                 bool clear_space,
 237                                                 bool mangle_space) {
 238   uintx alignment =
 239     GenCollectedHeap::heap()->collector_policy()->space_alignment();
 240 
 241   // If the spaces are being cleared (only done at heap initialization
 242   // currently), the survivor spaces need not be empty.
 243   // Otherwise, no care is taken for used areas in the survivor spaces
 244   // so check.
 245   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 246     "Initialization of the survivor spaces assumes these are empty");
 247 
 248   // Compute sizes
 249   uintx size = _virtual_space.committed_size();
 250   uintx survivor_size = compute_survivor_size(size, alignment);
 251   uintx eden_size = size - (2*survivor_size);
 252   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 253 
 254   if (eden_size < minimum_eden_size) {
 255     // May happen due to 64Kb rounding, if so adjust eden size back up
 256     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
 257     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 258     uintx unaligned_survivor_size =
 259       align_size_down(maximum_survivor_size, alignment);
 260     survivor_size = MAX2(unaligned_survivor_size, alignment);
 261     eden_size = size - (2*survivor_size);
 262     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 263     assert(eden_size >= minimum_eden_size, "just checking");
 264   }
 265 
 266   char *eden_start = _virtual_space.low();
 267   char *from_start = eden_start + eden_size;
 268   char *to_start   = from_start + survivor_size;
 269   char *to_end     = to_start   + survivor_size;
 270 
 271   assert(to_end == _virtual_space.high(), "just checking");
 272   assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
 273   assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
 274   assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
 275 
 276   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 277   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 278   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 279 
 280   // A minimum eden size implies that there is a part of eden that
 281   // is being used and that affects the initialization of any
 282   // newly formed eden.
 283   bool live_in_eden = minimum_eden_size > 0;
 284 
 285   // If not clearing the spaces, do some checking to verify that
 286   // the space are already mangled.
 287   if (!clear_space) {
 288     // Must check mangling before the spaces are reshaped.  Otherwise,
 289     // the bottom or end of one space may have moved into another
 290     // a failure of the check may not correctly indicate which space
 291     // is not properly mangled.
 292     if (ZapUnusedHeapArea) {
 293       HeapWord* limit = (HeapWord*) _virtual_space.high();
 294       eden()->check_mangled_unused_area(limit);
 295       from()->check_mangled_unused_area(limit);
 296         to()->check_mangled_unused_area(limit);
 297     }
 298   }
 299 
 300   // Reset the spaces for their new regions.
 301   eden()->initialize(edenMR,
 302                      clear_space && !live_in_eden,
 303                      SpaceDecorator::Mangle);
 304   // If clear_space and live_in_eden, we will not have cleared any
 305   // portion of eden above its top. This can cause newly
 306   // expanded space not to be mangled if using ZapUnusedHeapArea.
 307   // We explicitly do such mangling here.
 308   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
 309     eden()->mangle_unused_area();
 310   }
 311   from()->initialize(fromMR, clear_space, mangle_space);
 312   to()->initialize(toMR, clear_space, mangle_space);
 313 
 314   // Set next compaction spaces.
 315   eden()->set_next_compaction_space(from());
 316   // The to-space is normally empty before a compaction so need
 317   // not be considered.  The exception is during promotion
 318   // failure handling when to-space can contain live objects.
 319   from()->set_next_compaction_space(NULL);
 320 }
 321 
 322 void DefNewGeneration::swap_spaces() {
 323   ContiguousSpace* s = from();
 324   _from_space        = to();
 325   _to_space          = s;
 326   eden()->set_next_compaction_space(from());
 327   // The to-space is normally empty before a compaction so need
 328   // not be considered.  The exception is during promotion
 329   // failure handling when to-space can contain live objects.
 330   from()->set_next_compaction_space(NULL);
 331 
 332   if (UsePerfData) {
 333     CSpaceCounters* c = _from_counters;
 334     _from_counters = _to_counters;
 335     _to_counters = c;
 336   }
 337 }
 338 
 339 bool DefNewGeneration::expand(size_t bytes) {
 340   MutexLocker x(ExpandHeap_lock);
 341   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
 342   bool success = _virtual_space.expand_by(bytes);
 343   if (success && ZapUnusedHeapArea) {
 344     // Mangle newly committed space immediately because it
 345     // can be done here more simply that after the new
 346     // spaces have been computed.
 347     HeapWord* new_high = (HeapWord*) _virtual_space.high();
 348     MemRegion mangle_region(prev_high, new_high);
 349     SpaceMangler::mangle_region(mangle_region);
 350   }
 351 
 352   // Do not attempt an expand-to-the reserve size.  The
 353   // request should properly observe the maximum size of
 354   // the generation so an expand-to-reserve should be
 355   // unnecessary.  Also a second call to expand-to-reserve
 356   // value potentially can cause an undue expansion.
 357   // For example if the first expand fail for unknown reasons,
 358   // but the second succeeds and expands the heap to its maximum
 359   // value.
 360   if (GC_locker::is_active()) {
 361     log_debug(gc)("Garbage collection disabled, expanded heap instead");
 362   }
 363 
 364   return success;
 365 }
 366 
 367 void DefNewGeneration::compute_new_size() {
 368   // This is called after a GC that includes the old generation, so from-space
 369   // will normally be empty.
 370   // Note that we check both spaces, since if scavenge failed they revert roles.
 371   // If not we bail out (otherwise we would have to relocate the objects).
 372   if (!from()->is_empty() || !to()->is_empty()) {
 373     return;
 374   }
 375 
 376   GenCollectedHeap* gch = GenCollectedHeap::heap();
 377 
 378   size_t old_size = gch->old_gen()->capacity();
 379   size_t new_size_before = _virtual_space.committed_size();
 380   size_t min_new_size = initial_size();
 381   size_t max_new_size = reserved().byte_size();
 382   assert(min_new_size <= new_size_before &&
 383          new_size_before <= max_new_size,
 384          "just checking");
 385   // All space sizes must be multiples of Generation::GenGrain.
 386   size_t alignment = Generation::GenGrain;
 387 
 388   // Compute desired new generation size based on NewRatio and
 389   // NewSizeThreadIncrease
 390   size_t desired_new_size = old_size/NewRatio;
 391   int threads_count = Threads::number_of_non_daemon_threads();
 392   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
 393   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
 394 
 395   // Adjust new generation size
 396   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
 397   assert(desired_new_size <= max_new_size, "just checking");
 398 
 399   bool changed = false;
 400   if (desired_new_size > new_size_before) {
 401     size_t change = desired_new_size - new_size_before;
 402     assert(change % alignment == 0, "just checking");
 403     if (expand(change)) {
 404        changed = true;
 405     }
 406     // If the heap failed to expand to the desired size,
 407     // "changed" will be false.  If the expansion failed
 408     // (and at this point it was expected to succeed),
 409     // ignore the failure (leaving "changed" as false).
 410   }
 411   if (desired_new_size < new_size_before && eden()->is_empty()) {
 412     // bail out of shrinking if objects in eden
 413     size_t change = new_size_before - desired_new_size;
 414     assert(change % alignment == 0, "just checking");
 415     _virtual_space.shrink_by(change);
 416     changed = true;
 417   }
 418   if (changed) {
 419     // The spaces have already been mangled at this point but
 420     // may not have been cleared (set top = bottom) and should be.
 421     // Mangling was done when the heap was being expanded.
 422     compute_space_boundaries(eden()->used(),
 423                              SpaceDecorator::Clear,
 424                              SpaceDecorator::DontMangle);
 425     MemRegion cmr((HeapWord*)_virtual_space.low(),
 426                   (HeapWord*)_virtual_space.high());
 427     gch->barrier_set()->resize_covered_region(cmr);
 428 
 429     log_debug(gc, heap, ergo)(
 430         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
 431         new_size_before/K, _virtual_space.committed_size()/K,
 432         eden()->capacity()/K, from()->capacity()/K);
 433     log_trace(gc, heap, ergo)(
 434         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
 435           thread_increase_size/K, threads_count);
 436       }
 437 }
 438 
 439 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
 440   assert(false, "NYI -- are you sure you want to call this?");
 441 }
 442 
 443 
 444 size_t DefNewGeneration::capacity() const {
 445   return eden()->capacity()
 446        + from()->capacity();  // to() is only used during scavenge
 447 }
 448 
 449 
 450 size_t DefNewGeneration::used() const {
 451   return eden()->used()
 452        + from()->used();      // to() is only used during scavenge
 453 }
 454 
 455 
 456 size_t DefNewGeneration::free() const {
 457   return eden()->free()
 458        + from()->free();      // to() is only used during scavenge
 459 }
 460 
 461 size_t DefNewGeneration::max_capacity() const {
 462   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
 463   const size_t reserved_bytes = reserved().byte_size();
 464   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
 465 }
 466 
 467 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 468   return eden()->free();
 469 }
 470 
 471 size_t DefNewGeneration::capacity_before_gc() const {
 472   return eden()->capacity();
 473 }
 474 
 475 size_t DefNewGeneration::contiguous_available() const {
 476   return eden()->free();
 477 }
 478 
 479 
 480 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 481 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 482 
 483 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 484   eden()->object_iterate(blk);
 485   from()->object_iterate(blk);
 486 }
 487 
 488 
 489 void DefNewGeneration::space_iterate(SpaceClosure* blk,
 490                                      bool usedOnly) {
 491   blk->do_space(eden());
 492   blk->do_space(from());
 493   blk->do_space(to());
 494 }
 495 
 496 // The last collection bailed out, we are running out of heap space,
 497 // so we try to allocate the from-space, too.
 498 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 499   bool should_try_alloc = should_allocate_from_space() || GC_locker::is_active_and_needs_gc();
 500 
 501   // If the Heap_lock is not locked by this thread, this will be called
 502   // again later with the Heap_lock held.
 503   bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
 504 
 505   HeapWord* result = NULL;
 506   if (do_alloc) {
 507     result = from()->allocate(size);
 508   }
 509 
 510   log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
 511                         size,
 512                         GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
 513                           "true" : "false",
 514                         Heap_lock->is_locked() ? "locked" : "unlocked",
 515                         from()->free(),
 516                         should_try_alloc ? "" : "  should_allocate_from_space: NOT",
 517                         do_alloc ? "  Heap_lock is not owned by self" : "",
 518                         result == NULL ? "NULL" : "object");
 519 
 520   return result;
 521 }
 522 
 523 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 524                                                 bool   is_tlab,
 525                                                 bool   parallel) {
 526   // We don't attempt to expand the young generation (but perhaps we should.)
 527   return allocate(size, is_tlab);
 528 }
 529 
 530 void DefNewGeneration::adjust_desired_tenuring_threshold() {
 531   // Set the desired survivor size to half the real survivor space
 532   GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->collector_policy()->counters();
 533   _tenuring_threshold =
 534     age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
 535 }
 536 
 537 void DefNewGeneration::collect(bool   full,
 538                                bool   clear_all_soft_refs,
 539                                size_t size,
 540                                bool   is_tlab) {
 541   assert(full || size > 0, "otherwise we don't want to collect");
 542 
 543   GenCollectedHeap* gch = GenCollectedHeap::heap();
 544 
 545   _gc_timer->register_gc_start();
 546   DefNewTracer gc_tracer;
 547   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 548 
 549   _old_gen = gch->old_gen();
 550 
 551   // If the next generation is too full to accommodate promotion
 552   // from this generation, pass on collection; let the next generation
 553   // do it.
 554   if (!collection_attempt_is_safe()) {
 555     log_trace(gc)(":: Collection attempt not safe ::");
 556     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
 557     return;
 558   }
 559   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 560 
 561   init_assuming_no_promotion_failure();
 562 
 563   GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause());
 564 
 565   gch->trace_heap_before_gc(&gc_tracer);
 566 
 567   // These can be shared for all code paths
 568   IsAliveClosure is_alive(this);
 569   ScanWeakRefClosure scan_weak_ref(this);
 570 
 571   age_table()->clear();
 572   to()->clear(SpaceDecorator::Mangle);
 573 
 574   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 575 
 576   assert(gch->no_allocs_since_save_marks(),
 577          "save marks have not been newly set.");
 578 
 579   // Not very pretty.
 580   CollectorPolicy* cp = gch->collector_policy();
 581 
 582   FastScanClosure fsc_with_no_gc_barrier(this, false);
 583   FastScanClosure fsc_with_gc_barrier(this, true);
 584 
 585   KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
 586                                       gch->rem_set()->klass_rem_set());
 587   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 588                                            &fsc_with_no_gc_barrier,
 589                                            false);
 590 
 591   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 592   FastEvacuateFollowersClosure evacuate_followers(gch,
 593                                                   &fsc_with_no_gc_barrier,
 594                                                   &fsc_with_gc_barrier);
 595 
 596   assert(gch->no_allocs_since_save_marks(),
 597          "save marks have not been newly set.");
 598 
 599   {
 600     // DefNew needs to run with n_threads == 0, to make sure the serial
 601     // version of the card table scanning code is used.
 602     // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
 603     StrongRootsScope srs(0);
 604 
 605     gch->gen_process_roots(&srs,
 606                            GenCollectedHeap::YoungGen,
 607                            true,  // Process younger gens, if any,
 608                                   // as strong roots.
 609                            GenCollectedHeap::SO_ScavengeCodeCache,
 610                            GenCollectedHeap::StrongAndWeakRoots,
 611                            &fsc_with_no_gc_barrier,
 612                            &fsc_with_gc_barrier,
 613                            &cld_scan_closure);
 614   }
 615 
 616   // "evacuate followers".
 617   evacuate_followers.do_void();
 618 
 619   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 620   ReferenceProcessor* rp = ref_processor();
 621   rp->setup_policy(clear_all_soft_refs);
 622   const ReferenceProcessorStats& stats =
 623   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 624                                     NULL, _gc_timer);
 625   gc_tracer.report_gc_reference_stats(stats);
 626   gc_tracer.report_tenuring_threshold(tenuring_threshold());
 627 
 628   if (!_promotion_failed) {
 629     // Swap the survivor spaces.
 630     eden()->clear(SpaceDecorator::Mangle);
 631     from()->clear(SpaceDecorator::Mangle);
 632     if (ZapUnusedHeapArea) {
 633       // This is now done here because of the piece-meal mangling which
 634       // can check for valid mangling at intermediate points in the
 635       // collection(s).  When a young collection fails to collect
 636       // sufficient space resizing of the young generation can occur
 637       // an redistribute the spaces in the young generation.  Mangle
 638       // here so that unzapped regions don't get distributed to
 639       // other spaces.
 640       to()->mangle_unused_area();
 641     }
 642     swap_spaces();
 643 
 644     assert(to()->is_empty(), "to space should be empty now");
 645 
 646     adjust_desired_tenuring_threshold();
 647 
 648     // A successful scavenge should restart the GC time limit count which is
 649     // for full GC's.
 650     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 651     size_policy->reset_gc_overhead_limit_count();
 652     assert(!gch->incremental_collection_failed(), "Should be clear");
 653   } else {
 654     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 655     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 656 
 657     remove_forwarding_pointers();
 658     log_debug(gc)("Promotion failed");
 659     // Add to-space to the list of space to compact
 660     // when a promotion failure has occurred.  In that
 661     // case there can be live objects in to-space
 662     // as a result of a partial evacuation of eden
 663     // and from-space.
 664     swap_spaces();   // For uniformity wrt ParNewGeneration.
 665     from()->set_next_compaction_space(to());
 666     gch->set_incremental_collection_failed();
 667 
 668     // Inform the next generation that a promotion failure occurred.
 669     _old_gen->promotion_failure_occurred();
 670     gc_tracer.report_promotion_failed(_promotion_failed_info);
 671 
 672     // Reset the PromotionFailureALot counters.
 673     NOT_PRODUCT(gch->reset_promotion_should_fail();)
 674   }
 675   // set new iteration safe limit for the survivor spaces
 676   from()->set_concurrent_iteration_safe_limit(from()->top());
 677   to()->set_concurrent_iteration_safe_limit(to()->top());
 678 
 679   // We need to use a monotonically non-decreasing time in ms
 680   // or we will see time-warp warnings and os::javaTimeMillis()
 681   // does not guarantee monotonicity.
 682   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 683   update_time_of_last_gc(now);
 684 
 685   gch->trace_heap_after_gc(&gc_tracer);
 686 
 687   _gc_timer->register_gc_end();
 688 
 689   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 690 }
 691 
 692 class RemoveForwardPointerClosure: public ObjectClosure {
 693 public:
 694   void do_object(oop obj) {
 695     obj->init_mark();
 696   }
 697 };
 698 
 699 void DefNewGeneration::init_assuming_no_promotion_failure() {
 700   _promotion_failed = false;
 701   _promotion_failed_info.reset();
 702   from()->set_next_compaction_space(NULL);
 703 }
 704 
 705 void DefNewGeneration::remove_forwarding_pointers() {
 706   RemoveForwardPointerClosure rspc;
 707   eden()->object_iterate(&rspc);
 708   from()->object_iterate(&rspc);
 709 
 710   // Now restore saved marks, if any.
 711   assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
 712          "should be the same");
 713   while (!_objs_with_preserved_marks.is_empty()) {
 714     oop obj   = _objs_with_preserved_marks.pop();
 715     markOop m = _preserved_marks_of_objs.pop();
 716     obj->set_mark(m);
 717   }
 718   _objs_with_preserved_marks.clear(true);
 719   _preserved_marks_of_objs.clear(true);
 720 }
 721 
 722 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
 723   assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
 724          "Oversaving!");
 725   _objs_with_preserved_marks.push(obj);
 726   _preserved_marks_of_objs.push(m);
 727 }
 728 
 729 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
 730   if (m->must_be_preserved_for_promotion_failure(obj)) {
 731     preserve_mark(obj, m);
 732   }
 733 }
 734 
 735 void DefNewGeneration::handle_promotion_failure(oop old) {
 736   log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());
 737 
 738   _promotion_failed = true;
 739   _promotion_failed_info.register_copy_failure(old->size());
 740   preserve_mark_if_necessary(old, old->mark());
 741   // forward to self
 742   old->forward_to(old);
 743 
 744   _promo_failure_scan_stack.push(old);
 745 
 746   if (!_promo_failure_drain_in_progress) {
 747     // prevent recursion in copy_to_survivor_space()
 748     _promo_failure_drain_in_progress = true;
 749     drain_promo_failure_scan_stack();
 750     _promo_failure_drain_in_progress = false;
 751   }
 752 }
 753 
 754 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 755   assert(is_in_reserved(old) && !old->is_forwarded(),
 756          "shouldn't be scavenging this oop");
 757   size_t s = old->size();
 758   oop obj = NULL;
 759 
 760   // Try allocating obj in to-space (unless too old)
 761   if (old->age() < tenuring_threshold()) {
 762     obj = (oop) to()->allocate_aligned(s);
 763   }
 764 
 765   // Otherwise try allocating obj tenured
 766   if (obj == NULL) {
 767     obj = _old_gen->promote(old, s);
 768     if (obj == NULL) {
 769       handle_promotion_failure(old);
 770       return old;
 771     }
 772   } else {
 773     // Prefetch beyond obj
 774     const intx interval = PrefetchCopyIntervalInBytes;
 775     Prefetch::write(obj, interval);
 776 
 777     // Copy obj
 778     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
 779 
 780     // Increment age if obj still in new generation
 781     obj->incr_age();
 782     age_table()->add(obj, s);
 783   }
 784 
 785   // Done, insert forward pointer to obj in this header
 786   old->forward_to(obj);
 787 
 788   return obj;
 789 }
 790 
 791 void DefNewGeneration::drain_promo_failure_scan_stack() {
 792   while (!_promo_failure_scan_stack.is_empty()) {
 793      oop obj = _promo_failure_scan_stack.pop();
 794      obj->oop_iterate(_promo_failure_scan_stack_closure);
 795   }
 796 }
 797 
 798 void DefNewGeneration::save_marks() {
 799   eden()->set_saved_mark();
 800   to()->set_saved_mark();
 801   from()->set_saved_mark();
 802 }
 803 
 804 
 805 void DefNewGeneration::reset_saved_marks() {
 806   eden()->reset_saved_mark();
 807   to()->reset_saved_mark();
 808   from()->reset_saved_mark();
 809 }
 810 
 811 
 812 bool DefNewGeneration::no_allocs_since_save_marks() {
 813   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 814   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 815   return to()->saved_mark_at_top();
 816 }
 817 
 818 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
 819                                                                 \
 820 void DefNewGeneration::                                         \
 821 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
 822   cl->set_generation(this);                                     \
 823   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 824   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
 825   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 826   cl->reset_generation();                                       \
 827   save_marks();                                                 \
 828 }
 829 
 830 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
 831 
 832 #undef DefNew_SINCE_SAVE_MARKS_DEFN
 833 
 834 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
 835                                          size_t max_alloc_words) {
 836   if (requestor == this || _promotion_failed) {
 837     return;
 838   }
 839   assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
 840 
 841   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
 842   if (to_space->top() > to_space->bottom()) {
 843     trace("to_space not empty when contribute_scratch called");
 844   }
 845   */
 846 
 847   ContiguousSpace* to_space = to();
 848   assert(to_space->end() >= to_space->top(), "pointers out of order");
 849   size_t free_words = pointer_delta(to_space->end(), to_space->top());
 850   if (free_words >= MinFreeScratchWords) {
 851     ScratchBlock* sb = (ScratchBlock*)to_space->top();
 852     sb->num_words = free_words;
 853     sb->next = list;
 854     list = sb;
 855   }
 856 }
 857 
 858 void DefNewGeneration::reset_scratch() {
 859   // If contributing scratch in to_space, mangle all of
 860   // to_space if ZapUnusedHeapArea.  This is needed because
 861   // top is not maintained while using to-space as scratch.
 862   if (ZapUnusedHeapArea) {
 863     to()->mangle_unused_area_complete();
 864   }
 865 }
 866 
 867 bool DefNewGeneration::collection_attempt_is_safe() {
 868   if (!to()->is_empty()) {
 869     log_trace(gc)(":: to is not empty ::");
 870     return false;
 871   }
 872   if (_old_gen == NULL) {
 873     GenCollectedHeap* gch = GenCollectedHeap::heap();
 874     _old_gen = gch->old_gen();
 875   }
 876   return _old_gen->promotion_attempt_is_safe(used());
 877 }
 878 
 879 void DefNewGeneration::gc_epilogue(bool full) {
 880   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
 881 
 882   assert(!GC_locker::is_active(), "We should not be executing here");
 883   // Check if the heap is approaching full after a collection has
 884   // been done.  Generally the young generation is empty at
 885   // a minimum at the end of a collection.  If it is not, then
 886   // the heap is approaching full.
 887   GenCollectedHeap* gch = GenCollectedHeap::heap();
 888   if (full) {
 889     DEBUG_ONLY(seen_incremental_collection_failed = false;)
 890     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
 891       log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
 892                             GCCause::to_string(gch->gc_cause()));
 893       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
 894       set_should_allocate_from_space(); // we seem to be running out of space
 895     } else {
 896       log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
 897                             GCCause::to_string(gch->gc_cause()));
 898       gch->clear_incremental_collection_failed(); // We just did a full collection
 899       clear_should_allocate_from_space(); // if set
 900     }
 901   } else {
 902 #ifdef ASSERT
 903     // It is possible that incremental_collection_failed() == true
 904     // here, because an attempted scavenge did not succeed. The policy
 905     // is normally expected to cause a full collection which should
 906     // clear that condition, so we should not be here twice in a row
 907     // with incremental_collection_failed() == true without having done
 908     // a full collection in between.
 909     if (!seen_incremental_collection_failed &&
 910         gch->incremental_collection_failed()) {
 911       log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
 912                             GCCause::to_string(gch->gc_cause()));
 913       seen_incremental_collection_failed = true;
 914     } else if (seen_incremental_collection_failed) {
 915       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
 916                             GCCause::to_string(gch->gc_cause()));
 917       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
 918              (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
 919              !gch->incremental_collection_failed(),
 920              "Twice in a row");
 921       seen_incremental_collection_failed = false;
 922     }
 923 #endif // ASSERT
 924   }
 925 
 926   if (ZapUnusedHeapArea) {
 927     eden()->check_mangled_unused_area_complete();
 928     from()->check_mangled_unused_area_complete();
 929     to()->check_mangled_unused_area_complete();
 930   }
 931 
 932   if (!CleanChunkPoolAsync) {
 933     Chunk::clean_chunk_pool();
 934   }
 935 
 936   // update the generation and space performance counters
 937   update_counters();
 938   gch->collector_policy()->counters()->update_counters();
 939 }
 940 
 941 void DefNewGeneration::record_spaces_top() {
 942   assert(ZapUnusedHeapArea, "Not mangling unused space");
 943   eden()->set_top_for_allocations();
 944   to()->set_top_for_allocations();
 945   from()->set_top_for_allocations();
 946 }
 947 
 948 void DefNewGeneration::ref_processor_init() {
 949   Generation::ref_processor_init();
 950 }
 951 
 952 
 953 void DefNewGeneration::update_counters() {
 954   if (UsePerfData) {
 955     _eden_counters->update_all();
 956     _from_counters->update_all();
 957     _to_counters->update_all();
 958     _gen_counters->update_all();
 959   }
 960 }
 961 
 962 void DefNewGeneration::verify() {
 963   eden()->verify();
 964   from()->verify();
 965     to()->verify();
 966 }
 967 
 968 void DefNewGeneration::print_on(outputStream* st) const {
 969   Generation::print_on(st);
 970   st->print("  eden");
 971   eden()->print_on(st);
 972   st->print("  from");
 973   from()->print_on(st);
 974   st->print("  to  ");
 975   to()->print_on(st);
 976 }
 977 
 978 
 979 const char* DefNewGeneration::name() const {
 980   return "def new generation";
 981 }
 982 
 983 // Moved from inline file as they are not called inline
 984 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
 985   return eden();
 986 }
 987 
 988 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
 989   // This is the slow-path allocation for the DefNewGeneration.
 990   // Most allocations are fast-path in compiled code.
 991   // We try to allocate from the eden.  If that works, we are happy.
 992   // Note that since DefNewGeneration supports lock-free allocation, we
 993   // have to use it here, as well.
 994   HeapWord* result = eden()->par_allocate(word_size);
 995   if (result != NULL) {
 996     if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
 997       _old_gen->sample_eden_chunk();
 998     }
 999   } else {
1000     // If the eden is full and the last collection bailed out, we are running
1001     // out of heap space, and we try to allocate the from-space, too.
1002     // allocate_from_space can't be inlined because that would introduce a
1003     // circular dependency at compile time.
1004     result = allocate_from_space(word_size);
1005   }
1006   return result;
1007 }
1008 
1009 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1010                                          bool is_tlab) {
1011   HeapWord* res = eden()->par_allocate(word_size);
1012   if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
1013     _old_gen->sample_eden_chunk();
1014   }
1015   return res;
1016 }
1017 
1018 size_t DefNewGeneration::tlab_capacity() const {
1019   return eden()->capacity();
1020 }
1021 
1022 size_t DefNewGeneration::tlab_used() const {
1023   return eden()->used();
1024 }
1025 
1026 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1027   return unsafe_max_alloc_nogc();
1028 }