1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_defNewGeneration.cpp.incl"
  27 
  28 //
  29 // DefNewGeneration functions.
  30 
  31 // Methods of protected closure types.
  32 
  33 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
  34   assert(g->level() == 0, "Optimized for youngest gen.");
  35 }
  36 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
  37   assert(false, "Do not call.");
  38 }
  39 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
  40   return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
  41 }
  42 
  43 DefNewGeneration::KeepAliveClosure::
  44 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
  45   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
  46   assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
  47   _rs = (CardTableRS*)rs;
  48 }
  49 
  50 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  51 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  52 
  53 
  54 DefNewGeneration::FastKeepAliveClosure::
  55 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
  56   DefNewGeneration::KeepAliveClosure(cl) {
  57   _boundary = g->reserved().end();
  58 }
  59 
  60 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  61 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  62 
  63 DefNewGeneration::EvacuateFollowersClosure::
  64 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  65                          ScanClosure* cur, ScanClosure* older) :
  66   _gch(gch), _level(level),
  67   _scan_cur_or_nonheap(cur), _scan_older(older)
  68 {}
  69 
  70 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
  71   do {
  72     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
  73                                        _scan_older);
  74   } while (!_gch->no_allocs_since_save_marks(_level));
  75 }
  76 
  77 DefNewGeneration::FastEvacuateFollowersClosure::
  78 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  79                              DefNewGeneration* gen,
  80                              FastScanClosure* cur, FastScanClosure* older) :
  81   _gch(gch), _level(level), _gen(gen),
  82   _scan_cur_or_nonheap(cur), _scan_older(older)
  83 {}
  84 
  85 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
  86   do {
  87     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
  88                                        _scan_older);
  89   } while (!_gch->no_allocs_since_save_marks(_level));
  90   guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
  91 }
  92 
  93 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
  94   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
  95 {
  96   assert(_g->level() == 0, "Optimized for youngest generation");
  97   _boundary = _g->reserved().end();
  98 }
  99 
 100 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 101 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 102 
 103 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 104   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 105 {
 106   assert(_g->level() == 0, "Optimized for youngest generation");
 107   _boundary = _g->reserved().end();
 108 }
 109 
 110 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 111 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 112 
 113 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 114   OopClosure(g->ref_processor()), _g(g)
 115 {
 116   assert(_g->level() == 0, "Optimized for youngest generation");
 117   _boundary = _g->reserved().end();
 118 }
 119 
 120 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 121 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 122 
 123 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 124 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 125 
 126 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 127                                    size_t initial_size,
 128                                    int level,
 129                                    const char* policy)
 130   : Generation(rs, initial_size, level),
 131     _promo_failure_drain_in_progress(false),
 132     _should_allocate_from_space(false)
 133 {
 134   MemRegion cmr((HeapWord*)_virtual_space.low(),
 135                 (HeapWord*)_virtual_space.high());
 136   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 137 
 138   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 139     _eden_space = new ConcEdenSpace(this);
 140   } else {
 141     _eden_space = new EdenSpace(this);
 142   }
 143   _from_space = new ContiguousSpace();
 144   _to_space   = new ContiguousSpace();
 145 
 146   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 147     vm_exit_during_initialization("Could not allocate a new gen space");
 148 
 149   // Compute the maximum eden and survivor space sizes. These sizes
 150   // are computed assuming the entire reserved space is committed.
 151   // These values are exported as performance counters.
 152   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 153   uintx size = _virtual_space.reserved_size();
 154   _max_survivor_size = compute_survivor_size(size, alignment);
 155   _max_eden_size = size - (2*_max_survivor_size);
 156 
 157   // allocate the performance counters
 158 
 159   // Generation counters -- generation 0, 3 subspaces
 160   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
 161   _gc_counters = new CollectorCounters(policy, 0);
 162 
 163   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 164                                       _gen_counters);
 165   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 166                                       _gen_counters);
 167   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 168                                     _gen_counters);
 169 
 170   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 171   update_counters();
 172   _next_gen = NULL;
 173   _tenuring_threshold = MaxTenuringThreshold;
 174   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 175 }
 176 
 177 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 178                                                 bool clear_space,
 179                                                 bool mangle_space) {
 180   uintx alignment =
 181     GenCollectedHeap::heap()->collector_policy()->min_alignment();
 182 
 183   // If the spaces are being cleared (only done at heap initialization
 184   // currently), the survivor spaces need not be empty.
 185   // Otherwise, no care is taken for used areas in the survivor spaces
 186   // so check.
 187   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 188     "Initialization of the survivor spaces assumes these are empty");
 189 
 190   // Compute sizes
 191   uintx size = _virtual_space.committed_size();
 192   uintx survivor_size = compute_survivor_size(size, alignment);
 193   uintx eden_size = size - (2*survivor_size);
 194   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 195 
 196   if (eden_size < minimum_eden_size) {
 197     // May happen due to 64Kb rounding, if so adjust eden size back up
 198     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
 199     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 200     uintx unaligned_survivor_size =
 201       align_size_down(maximum_survivor_size, alignment);
 202     survivor_size = MAX2(unaligned_survivor_size, alignment);
 203     eden_size = size - (2*survivor_size);
 204     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 205     assert(eden_size >= minimum_eden_size, "just checking");
 206   }
 207 
 208   char *eden_start = _virtual_space.low();
 209   char *from_start = eden_start + eden_size;
 210   char *to_start   = from_start + survivor_size;
 211   char *to_end     = to_start   + survivor_size;
 212 
 213   assert(to_end == _virtual_space.high(), "just checking");
 214   assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
 215   assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
 216   assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
 217 
 218   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 219   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 220   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 221 
 222   // A minimum eden size implies that there is a part of eden that
 223   // is being used and that affects the initialization of any
 224   // newly formed eden.
 225   bool live_in_eden = minimum_eden_size > 0;
 226 
 227   // If not clearing the spaces, do some checking to verify that
 228   // the space are already mangled.
 229   if (!clear_space) {
 230     // Must check mangling before the spaces are reshaped.  Otherwise,
 231     // the bottom or end of one space may have moved into another
 232     // a failure of the check may not correctly indicate which space
 233     // is not properly mangled.
 234     if (ZapUnusedHeapArea) {
 235       HeapWord* limit = (HeapWord*) _virtual_space.high();
 236       eden()->check_mangled_unused_area(limit);
 237       from()->check_mangled_unused_area(limit);
 238         to()->check_mangled_unused_area(limit);
 239     }
 240   }
 241 
 242   // Reset the spaces for their new regions.
 243   eden()->initialize(edenMR,
 244                      clear_space && !live_in_eden,
 245                      SpaceDecorator::Mangle);
 246   // If clear_space and live_in_eden, we will not have cleared any
 247   // portion of eden above its top. This can cause newly
 248   // expanded space not to be mangled if using ZapUnusedHeapArea.
 249   // We explicitly do such mangling here.
 250   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
 251     eden()->mangle_unused_area();
 252   }
 253   from()->initialize(fromMR, clear_space, mangle_space);
 254   to()->initialize(toMR, clear_space, mangle_space);
 255 
 256   // Set next compaction spaces.
 257   eden()->set_next_compaction_space(from());
 258   // The to-space is normally empty before a compaction so need
 259   // not be considered.  The exception is during promotion
 260   // failure handling when to-space can contain live objects.
 261   from()->set_next_compaction_space(NULL);
 262 }
 263 
 264 void DefNewGeneration::swap_spaces() {
 265   ContiguousSpace* s = from();
 266   _from_space        = to();
 267   _to_space          = s;
 268   eden()->set_next_compaction_space(from());
 269   // The to-space is normally empty before a compaction so need
 270   // not be considered.  The exception is during promotion
 271   // failure handling when to-space can contain live objects.
 272   from()->set_next_compaction_space(NULL);
 273 
 274   if (UsePerfData) {
 275     CSpaceCounters* c = _from_counters;
 276     _from_counters = _to_counters;
 277     _to_counters = c;
 278   }
 279 }
 280 
 281 bool DefNewGeneration::expand(size_t bytes) {
 282   MutexLocker x(ExpandHeap_lock);
 283   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
 284   bool success = _virtual_space.expand_by(bytes);
 285   if (success && ZapUnusedHeapArea) {
 286     // Mangle newly committed space immediately because it
 287     // can be done here more simply that after the new
 288     // spaces have been computed.
 289     HeapWord* new_high = (HeapWord*) _virtual_space.high();
 290     MemRegion mangle_region(prev_high, new_high);
 291     SpaceMangler::mangle_region(mangle_region);
 292   }
 293 
 294   // Do not attempt an expand-to-the reserve size.  The
 295   // request should properly observe the maximum size of
 296   // the generation so an expand-to-reserve should be
 297   // unnecessary.  Also a second call to expand-to-reserve
 298   // value potentially can cause an undue expansion.
 299   // For example if the first expand fail for unknown reasons,
 300   // but the second succeeds and expands the heap to its maximum
 301   // value.
 302   if (GC_locker::is_active()) {
 303     if (PrintGC && Verbose) {
 304       gclog_or_tty->print_cr("Garbage collection disabled, "
 305         "expanded heap instead");
 306     }
 307   }
 308 
 309   return success;
 310 }
 311 
 312 
 313 void DefNewGeneration::compute_new_size() {
 314   // This is called after a gc that includes the following generation
 315   // (which is required to exist.)  So from-space will normally be empty.
 316   // Note that we check both spaces, since if scavenge failed they revert roles.
 317   // If not we bail out (otherwise we would have to relocate the objects)
 318   if (!from()->is_empty() || !to()->is_empty()) {
 319     return;
 320   }
 321 
 322   int next_level = level() + 1;
 323   GenCollectedHeap* gch = GenCollectedHeap::heap();
 324   assert(next_level < gch->_n_gens,
 325          "DefNewGeneration cannot be an oldest gen");
 326 
 327   Generation* next_gen = gch->_gens[next_level];
 328   size_t old_size = next_gen->capacity();
 329   size_t new_size_before = _virtual_space.committed_size();
 330   size_t min_new_size = spec()->init_size();
 331   size_t max_new_size = reserved().byte_size();
 332   assert(min_new_size <= new_size_before &&
 333          new_size_before <= max_new_size,
 334          "just checking");
 335   // All space sizes must be multiples of Generation::GenGrain.
 336   size_t alignment = Generation::GenGrain;
 337 
 338   // Compute desired new generation size based on NewRatio and
 339   // NewSizeThreadIncrease
 340   size_t desired_new_size = old_size/NewRatio;
 341   int threads_count = Threads::number_of_non_daemon_threads();
 342   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
 343   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
 344 
 345   // Adjust new generation size
 346   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
 347   assert(desired_new_size <= max_new_size, "just checking");
 348 
 349   bool changed = false;
 350   if (desired_new_size > new_size_before) {
 351     size_t change = desired_new_size - new_size_before;
 352     assert(change % alignment == 0, "just checking");
 353     if (expand(change)) {
 354        changed = true;
 355     }
 356     // If the heap failed to expand to the desired size,
 357     // "changed" will be false.  If the expansion failed
 358     // (and at this point it was expected to succeed),
 359     // ignore the failure (leaving "changed" as false).
 360   }
 361   if (desired_new_size < new_size_before && eden()->is_empty()) {
 362     // bail out of shrinking if objects in eden
 363     size_t change = new_size_before - desired_new_size;
 364     assert(change % alignment == 0, "just checking");
 365     _virtual_space.shrink_by(change);
 366     changed = true;
 367   }
 368   if (changed) {
 369     // The spaces have already been mangled at this point but
 370     // may not have been cleared (set top = bottom) and should be.
 371     // Mangling was done when the heap was being expanded.
 372     compute_space_boundaries(eden()->used(),
 373                              SpaceDecorator::Clear,
 374                              SpaceDecorator::DontMangle);
 375     MemRegion cmr((HeapWord*)_virtual_space.low(),
 376                   (HeapWord*)_virtual_space.high());
 377     Universe::heap()->barrier_set()->resize_covered_region(cmr);
 378     if (Verbose && PrintGC) {
 379       size_t new_size_after  = _virtual_space.committed_size();
 380       size_t eden_size_after = eden()->capacity();
 381       size_t survivor_size_after = from()->capacity();
 382       gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
 383         SIZE_FORMAT "K [eden="
 384         SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
 385         new_size_before/K, new_size_after/K,
 386         eden_size_after/K, survivor_size_after/K);
 387       if (WizardMode) {
 388         gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
 389           thread_increase_size/K, threads_count);
 390       }
 391       gclog_or_tty->cr();
 392     }
 393   }
 394 }
 395 
 396 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
 397   // $$$ This may be wrong in case of "scavenge failure"?
 398   eden()->object_iterate(cl);
 399 }
 400 
 401 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
 402   assert(false, "NYI -- are you sure you want to call this?");
 403 }
 404 
 405 
 406 size_t DefNewGeneration::capacity() const {
 407   return eden()->capacity()
 408        + from()->capacity();  // to() is only used during scavenge
 409 }
 410 
 411 
 412 size_t DefNewGeneration::used() const {
 413   return eden()->used()
 414        + from()->used();      // to() is only used during scavenge
 415 }
 416 
 417 
 418 size_t DefNewGeneration::free() const {
 419   return eden()->free()
 420        + from()->free();      // to() is only used during scavenge
 421 }
 422 
 423 size_t DefNewGeneration::max_capacity() const {
 424   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 425   const size_t reserved_bytes = reserved().byte_size();
 426   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
 427 }
 428 
 429 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 430   return eden()->free();
 431 }
 432 
 433 size_t DefNewGeneration::capacity_before_gc() const {
 434   return eden()->capacity();
 435 }
 436 
 437 size_t DefNewGeneration::contiguous_available() const {
 438   return eden()->free();
 439 }
 440 
 441 
 442 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 443 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 444 
 445 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 446   eden()->object_iterate(blk);
 447   from()->object_iterate(blk);
 448 }
 449 
 450 
 451 void DefNewGeneration::space_iterate(SpaceClosure* blk,
 452                                      bool usedOnly) {
 453   blk->do_space(eden());
 454   blk->do_space(from());
 455   blk->do_space(to());
 456 }
 457 
 458 // The last collection bailed out, we are running out of heap space,
 459 // so we try to allocate the from-space, too.
 460 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 461   HeapWord* result = NULL;
 462   if (PrintGC && Verbose) {
 463     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
 464                   "  will_fail: %s"
 465                   "  heap_lock: %s"
 466                   "  free: " SIZE_FORMAT,
 467                   size,
 468                GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
 469                Heap_lock->is_locked() ? "locked" : "unlocked",
 470                from()->free());
 471     }
 472   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
 473     if (Heap_lock->owned_by_self() ||
 474         (SafepointSynchronize::is_at_safepoint() &&
 475          Thread::current()->is_VM_thread())) {
 476       // If the Heap_lock is not locked by this thread, this will be called
 477       // again later with the Heap_lock held.
 478       result = from()->allocate(size);
 479     } else if (PrintGC && Verbose) {
 480       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
 481     }
 482   } else if (PrintGC && Verbose) {
 483     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
 484   }
 485   if (PrintGC && Verbose) {
 486     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
 487   }
 488   return result;
 489 }
 490 
 491 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 492                                                 bool   is_tlab,
 493                                                 bool   parallel) {
 494   // We don't attempt to expand the young generation (but perhaps we should.)
 495   return allocate(size, is_tlab);
 496 }
 497 
 498 
 499 void DefNewGeneration::collect(bool   full,
 500                                bool   clear_all_soft_refs,
 501                                size_t size,
 502                                bool   is_tlab) {
 503   assert(full || size > 0, "otherwise we don't want to collect");
 504   GenCollectedHeap* gch = GenCollectedHeap::heap();
 505   _next_gen = gch->next_gen(this);
 506   assert(_next_gen != NULL,
 507     "This must be the youngest gen, and not the only gen");
 508 
 509   // If the next generation is too full to accomodate promotion
 510   // from this generation, pass on collection; let the next generation
 511   // do it.
 512   if (!collection_attempt_is_safe()) {
 513     gch->set_incremental_collection_will_fail();
 514     return;
 515   }
 516   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 517 
 518   init_assuming_no_promotion_failure();
 519 
 520   TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
 521   // Capture heap used before collection (for printing).
 522   size_t gch_prev_used = gch->used();
 523 
 524   SpecializationStats::clear();
 525 
 526   // These can be shared for all code paths
 527   IsAliveClosure is_alive(this);
 528   ScanWeakRefClosure scan_weak_ref(this);
 529 
 530   age_table()->clear();
 531   to()->clear(SpaceDecorator::Mangle);
 532 
 533   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 534 
 535   assert(gch->no_allocs_since_save_marks(0),
 536          "save marks have not been newly set.");
 537 
 538   // Not very pretty.
 539   CollectorPolicy* cp = gch->collector_policy();
 540 
 541   FastScanClosure fsc_with_no_gc_barrier(this, false);
 542   FastScanClosure fsc_with_gc_barrier(this, true);
 543 
 544   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 545   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
 546                                                   &fsc_with_no_gc_barrier,
 547                                                   &fsc_with_gc_barrier);
 548 
 549   assert(gch->no_allocs_since_save_marks(0),
 550          "save marks have not been newly set.");
 551 
 552   gch->gen_process_strong_roots(_level,
 553                                 true,  // Process younger gens, if any,
 554                                        // as strong roots.
 555                                 true,  // activate StrongRootsScope
 556                                 false, // not collecting perm generation.
 557                                 SharedHeap::SO_AllClasses,
 558                                 &fsc_with_no_gc_barrier,
 559                                 true,   // walk *all* scavengable nmethods
 560                                 &fsc_with_gc_barrier);
 561 
 562   // "evacuate followers".
 563   evacuate_followers.do_void();
 564 
 565   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 566   ReferenceProcessor* rp = ref_processor();
 567   rp->setup_policy(clear_all_soft_refs);
 568   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 569                                     NULL);
 570   if (!promotion_failed()) {
 571     // Swap the survivor spaces.
 572     eden()->clear(SpaceDecorator::Mangle);
 573     from()->clear(SpaceDecorator::Mangle);
 574     if (ZapUnusedHeapArea) {
 575       // This is now done here because of the piece-meal mangling which
 576       // can check for valid mangling at intermediate points in the
 577       // collection(s).  When a minor collection fails to collect
 578       // sufficient space resizing of the young generation can occur
 579       // an redistribute the spaces in the young generation.  Mangle
 580       // here so that unzapped regions don't get distributed to
 581       // other spaces.
 582       to()->mangle_unused_area();
 583     }
 584     swap_spaces();
 585 
 586     assert(to()->is_empty(), "to space should be empty now");
 587 
 588     // Set the desired survivor size to half the real survivor space
 589     _tenuring_threshold =
 590       age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 591 
 592     // A successful scavenge should restart the GC time limit count which is
 593     // for full GC's.
 594     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 595     size_policy->reset_gc_overhead_limit_count();
 596     if (PrintGC && !PrintGCDetails) {
 597       gch->print_heap_change(gch_prev_used);
 598     }
 599   } else {
 600     assert(HandlePromotionFailure,
 601       "Should not be here unless promotion failure handling is on");
 602     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 603     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 604 
 605     remove_forwarding_pointers();
 606     if (PrintGCDetails) {
 607       gclog_or_tty->print(" (promotion failed) ");
 608     }
 609     // Add to-space to the list of space to compact
 610     // when a promotion failure has occurred.  In that
 611     // case there can be live objects in to-space
 612     // as a result of a partial evacuation of eden
 613     // and from-space.
 614     swap_spaces();   // For uniformity wrt ParNewGeneration.
 615     from()->set_next_compaction_space(to());
 616     gch->set_incremental_collection_will_fail();
 617 
 618     // Inform the next generation that a promotion failure occurred.
 619     _next_gen->promotion_failure_occurred();
 620 
 621     // Reset the PromotionFailureALot counters.
 622     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 623   }
 624   // set new iteration safe limit for the survivor spaces
 625   from()->set_concurrent_iteration_safe_limit(from()->top());
 626   to()->set_concurrent_iteration_safe_limit(to()->top());
 627   SpecializationStats::print();
 628   update_time_of_last_gc(os::javaTimeMillis());
 629 }
 630 
 631 class RemoveForwardPointerClosure: public ObjectClosure {
 632 public:
 633   void do_object(oop obj) {
 634     obj->init_mark();
 635   }
 636 };
 637 
 638 void DefNewGeneration::init_assuming_no_promotion_failure() {
 639   _promotion_failed = false;
 640   from()->set_next_compaction_space(NULL);
 641 }
 642 
 643 void DefNewGeneration::remove_forwarding_pointers() {
 644   RemoveForwardPointerClosure rspc;
 645   eden()->object_iterate(&rspc);
 646   from()->object_iterate(&rspc);
 647 
 648   // Now restore saved marks, if any.
 649   assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
 650          "should be the same");
 651   while (!_objs_with_preserved_marks.is_empty()) {
 652     oop obj   = _objs_with_preserved_marks.pop();
 653     markOop m = _preserved_marks_of_objs.pop();
 654     obj->set_mark(m);
 655   }
 656   _objs_with_preserved_marks.clear(true);
 657   _preserved_marks_of_objs.clear(true);
 658 }
 659 
 660 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
 661   if (m->must_be_preserved_for_promotion_failure(obj)) {
 662     _objs_with_preserved_marks.push(obj);
 663     _preserved_marks_of_objs.push(m);
 664   }
 665 }
 666 
 667 void DefNewGeneration::handle_promotion_failure(oop old) {
 668   preserve_mark_if_necessary(old, old->mark());
 669   if (!_promotion_failed && PrintPromotionFailure) {
 670     gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
 671                         old->size());
 672   }
 673 
 674   // forward to self
 675   old->forward_to(old);
 676   _promotion_failed = true;
 677 
 678   _promo_failure_scan_stack.push(old);
 679 
 680   if (!_promo_failure_drain_in_progress) {
 681     // prevent recursion in copy_to_survivor_space()
 682     _promo_failure_drain_in_progress = true;
 683     drain_promo_failure_scan_stack();
 684     _promo_failure_drain_in_progress = false;
 685   }
 686 }
 687 
 688 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 689   assert(is_in_reserved(old) && !old->is_forwarded(),
 690          "shouldn't be scavenging this oop");
 691   size_t s = old->size();
 692   oop obj = NULL;
 693 
 694   // Try allocating obj in to-space (unless too old)
 695   if (old->age() < tenuring_threshold()) {
 696     obj = (oop) to()->allocate(s);
 697   }
 698 
 699   // Otherwise try allocating obj tenured
 700   if (obj == NULL) {
 701     obj = _next_gen->promote(old, s);
 702     if (obj == NULL) {
 703       if (!HandlePromotionFailure) {
 704         // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
 705         // is incorrectly set. In any case, its seriously wrong to be here!
 706         vm_exit_out_of_memory(s*wordSize, "promotion");
 707       }
 708 
 709       handle_promotion_failure(old);
 710       return old;
 711     }
 712   } else {
 713     // Prefetch beyond obj
 714     const intx interval = PrefetchCopyIntervalInBytes;
 715     Prefetch::write(obj, interval);
 716 
 717     // Copy obj
 718     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
 719 
 720     // Increment age if obj still in new generation
 721     obj->incr_age();
 722     age_table()->add(obj, s);
 723   }
 724 
 725   // Done, insert forward pointer to obj in this header
 726   old->forward_to(obj);
 727 
 728   return obj;
 729 }
 730 
 731 void DefNewGeneration::drain_promo_failure_scan_stack() {
 732   while (!_promo_failure_scan_stack.is_empty()) {
 733      oop obj = _promo_failure_scan_stack.pop();
 734      obj->oop_iterate(_promo_failure_scan_stack_closure);
 735   }
 736 }
 737 
 738 void DefNewGeneration::save_marks() {
 739   eden()->set_saved_mark();
 740   to()->set_saved_mark();
 741   from()->set_saved_mark();
 742 }
 743 
 744 
 745 void DefNewGeneration::reset_saved_marks() {
 746   eden()->reset_saved_mark();
 747   to()->reset_saved_mark();
 748   from()->reset_saved_mark();
 749 }
 750 
 751 
 752 bool DefNewGeneration::no_allocs_since_save_marks() {
 753   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 754   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 755   return to()->saved_mark_at_top();
 756 }
 757 
 758 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
 759                                                                 \
 760 void DefNewGeneration::                                         \
 761 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
 762   cl->set_generation(this);                                     \
 763   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 764   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
 765   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 766   cl->reset_generation();                                       \
 767   save_marks();                                                 \
 768 }
 769 
 770 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
 771 
 772 #undef DefNew_SINCE_SAVE_MARKS_DEFN
 773 
 774 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
 775                                          size_t max_alloc_words) {
 776   if (requestor == this || _promotion_failed) return;
 777   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
 778 
 779   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
 780   if (to_space->top() > to_space->bottom()) {
 781     trace("to_space not empty when contribute_scratch called");
 782   }
 783   */
 784 
 785   ContiguousSpace* to_space = to();
 786   assert(to_space->end() >= to_space->top(), "pointers out of order");
 787   size_t free_words = pointer_delta(to_space->end(), to_space->top());
 788   if (free_words >= MinFreeScratchWords) {
 789     ScratchBlock* sb = (ScratchBlock*)to_space->top();
 790     sb->num_words = free_words;
 791     sb->next = list;
 792     list = sb;
 793   }
 794 }
 795 
 796 void DefNewGeneration::reset_scratch() {
 797   // If contributing scratch in to_space, mangle all of
 798   // to_space if ZapUnusedHeapArea.  This is needed because
 799   // top is not maintained while using to-space as scratch.
 800   if (ZapUnusedHeapArea) {
 801     to()->mangle_unused_area_complete();
 802   }
 803 }
 804 
 805 bool DefNewGeneration::collection_attempt_is_safe() {
 806   if (!to()->is_empty()) {
 807     return false;
 808   }
 809   if (_next_gen == NULL) {
 810     GenCollectedHeap* gch = GenCollectedHeap::heap();
 811     _next_gen = gch->next_gen(this);
 812     assert(_next_gen != NULL,
 813            "This must be the youngest gen, and not the only gen");
 814   }
 815 
 816   // Decide if there's enough room for a full promotion
 817   // When using extremely large edens, we effectively lose a
 818   // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio"
 819   // flag to reduce the minimum evacuation space requirements. If
 820   // there is not enough space to evacuate eden during a scavenge,
 821   // the VM will immediately exit with an out of memory error.
 822   // This flag has not been tested
 823   // with collectors other than simple mark & sweep.
 824   //
 825   // Note that with the addition of promotion failure handling, the
 826   // VM will not immediately exit but will undo the young generation
 827   // collection.  The parameter is left here for compatibility.
 828   const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
 829 
 830   // worst_case_evacuation is based on "used()".  For the case where this
 831   // method is called after a collection, this is still appropriate because
 832   // the case that needs to be detected is one in which a full collection
 833   // has been done and has overflowed into the young generation.  In that
 834   // case a minor collection will fail (the overflow of the full collection
 835   // means there is no space in the old generation for any promotion).
 836   size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
 837 
 838   return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
 839                                               HandlePromotionFailure);
 840 }
 841 
 842 void DefNewGeneration::gc_epilogue(bool full) {
 843   // Check if the heap is approaching full after a collection has
 844   // been done.  Generally the young generation is empty at
 845   // a minimum at the end of a collection.  If it is not, then
 846   // the heap is approaching full.
 847   GenCollectedHeap* gch = GenCollectedHeap::heap();
 848   clear_should_allocate_from_space();
 849   if (collection_attempt_is_safe()) {
 850     gch->clear_incremental_collection_will_fail();
 851   } else {
 852     gch->set_incremental_collection_will_fail();
 853     if (full) { // we seem to be running out of space
 854       set_should_allocate_from_space();
 855     }
 856   }
 857 
 858   if (ZapUnusedHeapArea) {
 859     eden()->check_mangled_unused_area_complete();
 860     from()->check_mangled_unused_area_complete();
 861     to()->check_mangled_unused_area_complete();
 862   }
 863 
 864   // update the generation and space performance counters
 865   update_counters();
 866   gch->collector_policy()->counters()->update_counters();
 867 }
 868 
 869 void DefNewGeneration::record_spaces_top() {
 870   assert(ZapUnusedHeapArea, "Not mangling unused space");
 871   eden()->set_top_for_allocations();
 872   to()->set_top_for_allocations();
 873   from()->set_top_for_allocations();
 874 }
 875 
 876 
 877 void DefNewGeneration::update_counters() {
 878   if (UsePerfData) {
 879     _eden_counters->update_all();
 880     _from_counters->update_all();
 881     _to_counters->update_all();
 882     _gen_counters->update_all();
 883   }
 884 }
 885 
 886 void DefNewGeneration::verify(bool allow_dirty) {
 887   eden()->verify(allow_dirty);
 888   from()->verify(allow_dirty);
 889     to()->verify(allow_dirty);
 890 }
 891 
 892 void DefNewGeneration::print_on(outputStream* st) const {
 893   Generation::print_on(st);
 894   st->print("  eden");
 895   eden()->print_on(st);
 896   st->print("  from");
 897   from()->print_on(st);
 898   st->print("  to  ");
 899   to()->print_on(st);
 900 }
 901 
 902 
 903 const char* DefNewGeneration::name() const {
 904   return "def new generation";
 905 }
 906 
 907 // Moved from inline file as they are not called inline
 908 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
 909   return eden();
 910 }
 911 
 912 HeapWord* DefNewGeneration::allocate(size_t word_size,
 913                                      bool is_tlab) {
 914   // This is the slow-path allocation for the DefNewGeneration.
 915   // Most allocations are fast-path in compiled code.
 916   // We try to allocate from the eden.  If that works, we are happy.
 917   // Note that since DefNewGeneration supports lock-free allocation, we
 918   // have to use it here, as well.
 919   HeapWord* result = eden()->par_allocate(word_size);
 920   if (result != NULL) {
 921     return result;
 922   }
 923   do {
 924     HeapWord* old_limit = eden()->soft_end();
 925     if (old_limit < eden()->end()) {
 926       // Tell the next generation we reached a limit.
 927       HeapWord* new_limit =
 928         next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
 929       if (new_limit != NULL) {
 930         Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
 931       } else {
 932         assert(eden()->soft_end() == eden()->end(),
 933                "invalid state after allocation_limit_reached returned null");
 934       }
 935     } else {
 936       // The allocation failed and the soft limit is equal to the hard limit,
 937       // there are no reasons to do an attempt to allocate
 938       assert(old_limit == eden()->end(), "sanity check");
 939       break;
 940     }
 941     // Try to allocate until succeeded or the soft limit can't be adjusted
 942     result = eden()->par_allocate(word_size);
 943   } while (result == NULL);
 944 
 945   // If the eden is full and the last collection bailed out, we are running
 946   // out of heap space, and we try to allocate the from-space, too.
 947   // allocate_from_space can't be inlined because that would introduce a
 948   // circular dependency at compile time.
 949   if (result == NULL) {
 950     result = allocate_from_space(word_size);
 951   }
 952   return result;
 953 }
 954 
 955 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
 956                                          bool is_tlab) {
 957   return eden()->par_allocate(word_size);
 958 }
 959 
 960 void DefNewGeneration::gc_prologue(bool full) {
 961   // Ensure that _end and _soft_end are the same in eden space.
 962   eden()->set_soft_end(eden()->end());
 963 }
 964 
 965 size_t DefNewGeneration::tlab_capacity() const {
 966   return eden()->capacity();
 967 }
 968 
 969 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
 970   return unsafe_max_alloc_nogc();
 971 }