1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)defNewGeneration.cpp 1.73 07/05/22 17:24:57 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 # include "incls/_precompiled.incl"
  29 # include "incls/_defNewGeneration.cpp.incl"
  30 
  31 //
  32 // DefNewGeneration functions.
  33 
  34 // Methods of protected closure types.
  35 
  36 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
  37   assert(g->level() == 0, "Optimized for youngest gen.");
  38 }
  39 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
  40   assert(false, "Do not call.");
  41 }
  42 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
  43   return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
  44 }
  45 
  46 DefNewGeneration::KeepAliveClosure::
  47 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
  48   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
  49   assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
  50   _rs = (CardTableRS*)rs;
  51 }
  52 
  53 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  54 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  55 
  56 
  57 DefNewGeneration::FastKeepAliveClosure::
  58 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
  59   DefNewGeneration::KeepAliveClosure(cl) {
  60   _boundary = g->reserved().end();
  61 }
  62 
  63 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  64 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  65 
  66 DefNewGeneration::EvacuateFollowersClosure::
  67 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  68                          ScanClosure* cur, ScanClosure* older) :
  69   _gch(gch), _level(level),
  70   _scan_cur_or_nonheap(cur), _scan_older(older)
  71 {}
  72 
  73 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
  74   do {
  75     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
  76                                        _scan_older);
  77   } while (!_gch->no_allocs_since_save_marks(_level));
  78 }
  79 
  80 DefNewGeneration::FastEvacuateFollowersClosure::
  81 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  82                              DefNewGeneration* gen,
  83                              FastScanClosure* cur, FastScanClosure* older) :
  84   _gch(gch), _level(level), _gen(gen),
  85   _scan_cur_or_nonheap(cur), _scan_older(older)
  86 {}
  87 
  88 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
  89   do {
  90     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
  91                                        _scan_older);
  92   } while (!_gch->no_allocs_since_save_marks(_level));
  93   guarantee(_gen->promo_failure_scan_stack() == NULL
  94             || _gen->promo_failure_scan_stack()->length() == 0,
  95             "Failed to finish scan");
  96 }
  97 
  98 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 
  99   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 100 {
 101   assert(_g->level() == 0, "Optimized for youngest generation");
 102   _boundary = _g->reserved().end();
 103 }
 104 
 105 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 106 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 107 
 108 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 109   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 110 {
 111   assert(_g->level() == 0, "Optimized for youngest generation");
 112   _boundary = _g->reserved().end();
 113 }
 114 
 115 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 116 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 117 
 118 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 119   OopClosure(g->ref_processor()), _g(g)
 120 {
 121   assert(_g->level() == 0, "Optimized for youngest generation");
 122   _boundary = _g->reserved().end();
 123 }
 124 
 125 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 126 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 127 
 128 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 129 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 130 
 131 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 132                                    size_t initial_size,
 133                                    int level,
 134                                    const char* policy)
 135   : Generation(rs, initial_size, level),
 136     _objs_with_preserved_marks(NULL), 
 137     _preserved_marks_of_objs(NULL), 
 138     _promo_failure_scan_stack(NULL),
 139     _promo_failure_drain_in_progress(false),
 140     _should_allocate_from_space(false)
 141 {
 142   MemRegion cmr((HeapWord*)_virtual_space.low(),
 143                 (HeapWord*)_virtual_space.high());
 144   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 145 
 146   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 147     _eden_space = new ConcEdenSpace(this);
 148   } else {
 149     _eden_space = new EdenSpace(this);
 150   }
 151   _from_space = new ContiguousSpace();
 152   _to_space   = new ContiguousSpace();
 153 
 154   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 155     vm_exit_during_initialization("Could not allocate a new gen space");
 156 
 157   // Compute the maximum eden and survivor space sizes. These sizes
 158   // are computed assuming the entire reserved space is committed.
 159   // These values are exported as performance counters.
 160   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 161   uintx size = _virtual_space.reserved_size();
 162   _max_survivor_size = compute_survivor_size(size, alignment);
 163   _max_eden_size = size - (2*_max_survivor_size);
 164 
 165   // allocate the performance counters
 166 
 167   // Generation counters -- generation 0, 3 subspaces
 168   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
 169   _gc_counters = new CollectorCounters(policy, 0);
 170 
 171   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 172                                       _gen_counters);
 173   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 174                                       _gen_counters);
 175   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 176                                     _gen_counters);
 177 
 178   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 179   update_counters();
 180   _next_gen = NULL;
 181   _tenuring_threshold = MaxTenuringThreshold;
 182   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 183 }
 184 
 185 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 186                                                 bool clear_space,
 187                                                 bool mangle_space) {
 188   uintx alignment =
 189     GenCollectedHeap::heap()->collector_policy()->min_alignment();
 190 
 191   // If the spaces are being cleared (only done at heap initialization
 192   // currently), the survivor spaces need not be empty.
 193   // Otherwise, no care is taken for used areas in the survivor spaces
 194   // so check.
 195   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 196     "Initialization of the survivor spaces assumes these are empty");
 197 
 198   // Compute sizes
 199   uintx size = _virtual_space.committed_size();
 200   uintx survivor_size = compute_survivor_size(size, alignment);
 201   uintx eden_size = size - (2*survivor_size);
 202   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 203 
 204   if (eden_size < minimum_eden_size) {
 205     // May happen due to 64Kb rounding, if so adjust eden size back up
 206     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
 207     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 208     uintx unaligned_survivor_size = 
 209       align_size_down(maximum_survivor_size, alignment);
 210     survivor_size = MAX2(unaligned_survivor_size, alignment);
 211     eden_size = size - (2*survivor_size);
 212     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 213     assert(eden_size >= minimum_eden_size, "just checking");
 214   }
 215 
 216   char *eden_start = _virtual_space.low();
 217   char *from_start = eden_start + eden_size;
 218   char *to_start   = from_start + survivor_size;
 219   char *to_end     = to_start   + survivor_size;
 220 
 221   assert(to_end == _virtual_space.high(), "just checking");
 222   assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
 223   assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
 224   assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
 225 
 226   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 227   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 228   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 229 
 230   // A minimum eden size implies that there is a part of eden that
 231   // is being used and that affects the initialization of any
 232   // newly formed eden.
 233   bool live_in_eden = minimum_eden_size > 0;
 234 
 235   // If not clearing the spaces, do some checking to verify that
 236   // the space are already mangled.
 237   if (!clear_space) {
 238     // Must check mangling before the spaces are reshaped.  Otherwise,
 239     // the bottom or end of one space may have moved into another
 240     // a failure of the check may not correctly indicate which space
 241     // is not properly mangled.
 242     if (ZapUnusedHeapArea) {
 243       HeapWord* limit = (HeapWord*) _virtual_space.high();
 244       eden()->check_mangled_unused_area(limit);
 245       from()->check_mangled_unused_area(limit);
 246         to()->check_mangled_unused_area(limit);
 247     }
 248   }
 249 
 250   // Reset the spaces for their new regions.
 251   eden()->initialize(edenMR,
 252                      clear_space && !live_in_eden,
 253                      SpaceDecorator::Mangle);
 254   // If clear_space and live_in_eden, we will not have cleared any
 255   // portion of eden above its top. This can cause newly
 256   // expanded space not to be mangled if using ZapUnusedHeapArea.
 257   // We explicitly do such mangling here.
 258   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
 259     eden()->mangle_unused_area();
 260   }
 261   from()->initialize(fromMR, clear_space, mangle_space);
 262   to()->initialize(toMR, clear_space, mangle_space);
 263 
 264   // Set next compaction spaces.
 265   eden()->set_next_compaction_space(from());
 266   // The to-space is normally empty before a compaction so need
 267   // not be considered.  The exception is during promotion
 268   // failure handling when to-space can contain live objects.
 269   from()->set_next_compaction_space(NULL);
 270 }
 271 
 272 void DefNewGeneration::swap_spaces() {
 273   ContiguousSpace* s = from();
 274   _from_space        = to();
 275   _to_space          = s;
 276   eden()->set_next_compaction_space(from());
 277   // The to-space is normally empty before a compaction so need
 278   // not be considered.  The exception is during promotion
 279   // failure handling when to-space can contain live objects.
 280   from()->set_next_compaction_space(NULL);
 281 
 282   if (UsePerfData) {
 283     CSpaceCounters* c = _from_counters;
 284     _from_counters = _to_counters;
 285     _to_counters = c;
 286   }
 287 }
 288 
 289 bool DefNewGeneration::expand(size_t bytes) {
 290   MutexLocker x(ExpandHeap_lock);
 291   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
 292   bool success = _virtual_space.expand_by(bytes);
 293   if (success && ZapUnusedHeapArea) {
 294     // Mangle newly committed space immediately because it
 295     // can be done here more simply that after the new
 296     // spaces have been computed.
 297     HeapWord* new_high = (HeapWord*) _virtual_space.high();
 298     MemRegion mangle_region(prev_high, new_high);
 299     SpaceMangler::mangle_region(mangle_region);
 300   }
 301 
 302   // Do not attempt an expand-to-the reserve size.  The
 303   // request should properly observe the maximum size of
 304   // the generation so an expand-to-reserve should be
 305   // unnecessary.  Also a second call to expand-to-reserve
 306   // value potentially can cause an undue expansion.
 307   // For example if the first expand fail for unknown reasons,
 308   // but the second succeeds and expands the heap to its maximum
 309   // value.
 310   if (GC_locker::is_active()) {
 311     if (PrintGC && Verbose) {
 312       gclog_or_tty->print_cr("Garbage collection disabled, "
 313         "expanded heap instead");
 314     }
 315   }
 316 
 317   return success;
 318 }
 319 
 320 
 321 void DefNewGeneration::compute_new_size() {
 322   // This is called after a gc that includes the following generation
 323   // (which is required to exist.)  So from-space will normally be empty.
 324   // Note that we check both spaces, since if scavenge failed they revert roles.
 325   // If not we bail out (otherwise we would have to relocate the objects)
 326   if (!from()->is_empty() || !to()->is_empty()) {
 327     return;
 328   }
 329 
 330   int next_level = level() + 1;
 331   GenCollectedHeap* gch = GenCollectedHeap::heap();
 332   assert(next_level < gch->_n_gens,
 333          "DefNewGeneration cannot be an oldest gen");
 334     
 335   Generation* next_gen = gch->_gens[next_level];
 336   size_t old_size = next_gen->capacity();
 337   size_t new_size_before = _virtual_space.committed_size();
 338   size_t min_new_size = spec()->init_size();
 339   size_t max_new_size = reserved().byte_size();
 340   assert(min_new_size <= new_size_before &&
 341          new_size_before <= max_new_size,
 342          "just checking");
 343   // All space sizes must be multiples of Generation::GenGrain.
 344   size_t alignment = Generation::GenGrain;
 345 
 346   // Compute desired new generation size based on NewRatio and
 347   // NewSizeThreadIncrease
 348   size_t desired_new_size = old_size/NewRatio;
 349   int threads_count = Threads::number_of_non_daemon_threads();
 350   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
 351   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
 352 
 353   // Adjust new generation size
 354   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
 355   assert(desired_new_size <= max_new_size, "just checking");
 356 
 357   bool changed = false;
 358   if (desired_new_size > new_size_before) {
 359     size_t change = desired_new_size - new_size_before;
 360     assert(change % alignment == 0, "just checking");
 361     if (expand(change)) {
 362        changed = true;
 363     }
 364     // If the heap failed to expand to the desired size,
 365     // "changed" will be false.  If the expansion failed
 366     // (and at this point it was expected to succeed), 
 367     // ignore the failure (leaving "changed" as false).
 368   }
 369   if (desired_new_size < new_size_before && eden()->is_empty()) {
 370     // bail out of shrinking if objects in eden
 371     size_t change = new_size_before - desired_new_size;
 372     assert(change % alignment == 0, "just checking");
 373     _virtual_space.shrink_by(change);
 374     changed = true;
 375   }
 376   if (changed) {
 377     // The spaces have already been mangled at this point but
 378     // may not have been cleared (set top = bottom) and should be.
 379     // Mangling was done when the heap was being expanded.
 380     compute_space_boundaries(eden()->used(),
 381                              SpaceDecorator::Clear,
 382                              SpaceDecorator::DontMangle);
 383     MemRegion cmr((HeapWord*)_virtual_space.low(),
 384                   (HeapWord*)_virtual_space.high());
 385     Universe::heap()->barrier_set()->resize_covered_region(cmr);
 386     if (Verbose && PrintGC) {
 387       size_t new_size_after  = _virtual_space.committed_size();
 388       size_t eden_size_after = eden()->capacity();
 389       size_t survivor_size_after = from()->capacity();
 390       gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
 391         SIZE_FORMAT "K [eden="
 392         SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
 393         new_size_before/K, new_size_after/K,
 394         eden_size_after/K, survivor_size_after/K);
 395       if (WizardMode) {
 396         gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 
 397           thread_increase_size/K, threads_count);
 398       }
 399       gclog_or_tty->cr();
 400     }
 401   }
 402 }
 403 
 404 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
 405   // $$$ This may be wrong in case of "scavenge failure"?
 406   eden()->object_iterate(cl);
 407 }
 408 
 409 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
 410   assert(false, "NYI -- are you sure you want to call this?");
 411 }
 412 
 413 
 414 size_t DefNewGeneration::capacity() const {
 415   return eden()->capacity()
 416        + from()->capacity();  // to() is only used during scavenge
 417 }
 418 
 419 
 420 size_t DefNewGeneration::used() const {
 421   return eden()->used()
 422        + from()->used();      // to() is only used during scavenge
 423 }
 424 
 425 
 426 size_t DefNewGeneration::free() const {
 427   return eden()->free()
 428        + from()->free();      // to() is only used during scavenge
 429 }
 430 
 431 size_t DefNewGeneration::max_capacity() const {
 432   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 433   const size_t reserved_bytes = reserved().byte_size();
 434   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
 435 }
 436 
 437 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 438   return eden()->free();
 439 }
 440 
 441 size_t DefNewGeneration::capacity_before_gc() const {
 442   return eden()->capacity();
 443 }
 444 
 445 size_t DefNewGeneration::contiguous_available() const {
 446   return eden()->free();
 447 }
 448 
 449 
 450 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 451 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 452 
 453 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 454   eden()->object_iterate(blk);
 455   from()->object_iterate(blk);
 456 }
 457 
 458 
 459 void DefNewGeneration::space_iterate(SpaceClosure* blk,
 460                                      bool usedOnly) {
 461   blk->do_space(eden());
 462   blk->do_space(from());
 463   blk->do_space(to());
 464 }
 465 
 466 // The last collection bailed out, we are running out of heap space, 
 467 // so we try to allocate the from-space, too.
 468 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 469   HeapWord* result = NULL;
 470   if (PrintGC && Verbose) {
 471     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
 472                   "  will_fail: %s"
 473                   "  heap_lock: %s"
 474                   "  free: " SIZE_FORMAT,
 475                   size,
 476                GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
 477                Heap_lock->is_locked() ? "locked" : "unlocked",
 478                from()->free());
 479     }
 480   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
 481     if (Heap_lock->owned_by_self() ||
 482         (SafepointSynchronize::is_at_safepoint() &&
 483          Thread::current()->is_VM_thread())) {
 484       // If the Heap_lock is not locked by this thread, this will be called 
 485       // again later with the Heap_lock held.
 486       result = from()->allocate(size);
 487     } else if (PrintGC && Verbose) {
 488       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
 489     }
 490   } else if (PrintGC && Verbose) {
 491     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
 492   }
 493   if (PrintGC && Verbose) {
 494     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
 495   }
 496   return result;
 497 }
 498 
 499 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 500                                                 bool   is_tlab,
 501                                                 bool   parallel) {
 502   // We don't attempt to expand the young generation (but perhaps we should.)
 503   return allocate(size, is_tlab);
 504 }
 505 
 506 
 507 void DefNewGeneration::collect(bool   full,
 508                                bool   clear_all_soft_refs,
 509                                size_t size,
 510                                bool   is_tlab) {
 511   assert(full || size > 0, "otherwise we don't want to collect");
 512   GenCollectedHeap* gch = GenCollectedHeap::heap();
 513   _next_gen = gch->next_gen(this);
 514   assert(_next_gen != NULL, 
 515     "This must be the youngest gen, and not the only gen");
 516 
 517   // If the next generation is too full to accomodate promotion
 518   // from this generation, pass on collection; let the next generation
 519   // do it.
 520   if (!collection_attempt_is_safe()) {
 521     gch->set_incremental_collection_will_fail();
 522     return;
 523   }
 524   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 525 
 526   init_assuming_no_promotion_failure();
 527 
 528   TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
 529   // Capture heap used before collection (for printing).
 530   size_t gch_prev_used = gch->used();
 531 
 532   SpecializationStats::clear();
 533 
 534   // These can be shared for all code paths
 535   IsAliveClosure is_alive(this);
 536   ScanWeakRefClosure scan_weak_ref(this);
 537 
 538   age_table()->clear();
 539   to()->clear(SpaceDecorator::Mangle);
 540 
 541   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 542 
 543   assert(gch->no_allocs_since_save_marks(0), 
 544          "save marks have not been newly set.");
 545 
 546   // Not very pretty.
 547   CollectorPolicy* cp = gch->collector_policy();
 548 
 549   FastScanClosure fsc_with_no_gc_barrier(this, false);
 550   FastScanClosure fsc_with_gc_barrier(this, true);
 551 
 552   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 553   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
 554                                                   &fsc_with_no_gc_barrier,
 555                                                   &fsc_with_gc_barrier);
 556 
 557   assert(gch->no_allocs_since_save_marks(0),
 558          "save marks have not been newly set.");
 559 
 560   gch->gen_process_strong_roots(_level,
 561                                 true, // Process younger gens, if any, as
 562                                       // strong roots.
 563                                 false,// not collecting permanent generation.
 564                                 SharedHeap::SO_AllClasses,
 565                                 &fsc_with_gc_barrier,
 566                                 &fsc_with_no_gc_barrier);
 567 
 568   // "evacuate followers".
 569   evacuate_followers.do_void();
 570 
 571   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 572   ReferenceProcessor* rp = ref_processor();
 573   rp->setup_policy(clear_all_soft_refs);
 574   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 575                                     NULL);
 576   if (!promotion_failed()) {
 577     // Swap the survivor spaces.
 578     eden()->clear(SpaceDecorator::Mangle);
 579     from()->clear(SpaceDecorator::Mangle);
 580     if (ZapUnusedHeapArea) {
 581       // This is now done here because of the piece-meal mangling which
 582       // can check for valid mangling at intermediate points in the
 583       // collection(s).  When a minor collection fails to collect
 584       // sufficient space resizing of the young generation can occur
 585       // an redistribute the spaces in the young generation.  Mangle
 586       // here so that unzapped regions don't get distributed to
 587       // other spaces.
 588       to()->mangle_unused_area();
 589     }
 590     swap_spaces();
 591   
 592     assert(to()->is_empty(), "to space should be empty now");
 593 
 594     // Set the desired survivor size to half the real survivor space
 595     _tenuring_threshold =
 596       age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 597 
 598     if (PrintGC && !PrintGCDetails) {
 599       gch->print_heap_change(gch_prev_used);
 600     }
 601   } else {
 602     assert(HandlePromotionFailure, 
 603       "Should not be here unless promotion failure handling is on");
 604     assert(_promo_failure_scan_stack != NULL && 
 605       _promo_failure_scan_stack->length() == 0, "post condition");
 606 
 607     // deallocate stack and it's elements
 608     delete _promo_failure_scan_stack;
 609     _promo_failure_scan_stack = NULL;
 610 
 611     remove_forwarding_pointers();
 612     if (PrintGCDetails) {
 613       gclog_or_tty->print(" (promotion failed)");
 614     }
 615     // Add to-space to the list of space to compact
 616     // when a promotion failure has occurred.  In that
 617     // case there can be live objects in to-space
 618     // as a result of a partial evacuation of eden
 619     // and from-space.
 620     swap_spaces();   // For the sake of uniformity wrt ParNewGeneration::collect().
 621     from()->set_next_compaction_space(to());
 622     gch->set_incremental_collection_will_fail();
 623 
 624     // Reset the PromotionFailureALot counters.
 625     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 626   }
 627   // set new iteration safe limit for the survivor spaces
 628   from()->set_concurrent_iteration_safe_limit(from()->top());
 629   to()->set_concurrent_iteration_safe_limit(to()->top());
 630   SpecializationStats::print();
 631   update_time_of_last_gc(os::javaTimeMillis());
 632 }
 633 
 634 class RemoveForwardPointerClosure: public ObjectClosure {
 635 public:
 636   void do_object(oop obj) {
 637     obj->init_mark();
 638   }
 639 };
 640 
 641 void DefNewGeneration::init_assuming_no_promotion_failure() {
 642   _promotion_failed = false;
 643   from()->set_next_compaction_space(NULL);
 644 }
 645 
 646 void DefNewGeneration::remove_forwarding_pointers() {
 647   RemoveForwardPointerClosure rspc;
 648   eden()->object_iterate(&rspc);
 649   from()->object_iterate(&rspc);
 650   // Now restore saved marks, if any.
 651   if (_objs_with_preserved_marks != NULL) {
 652     assert(_preserved_marks_of_objs != NULL, "Both or none.");
 653     assert(_objs_with_preserved_marks->length() ==
 654            _preserved_marks_of_objs->length(), "Both or none.");
 655     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
 656       oop obj   = _objs_with_preserved_marks->at(i);
 657       markOop m = _preserved_marks_of_objs->at(i);
 658       obj->set_mark(m);
 659     }
 660     delete _objs_with_preserved_marks;
 661     delete _preserved_marks_of_objs;
 662     _objs_with_preserved_marks = NULL;
 663     _preserved_marks_of_objs = NULL;
 664   }
 665 }
 666 
 667 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
 668   if (m->must_be_preserved_for_promotion_failure(obj)) {
 669     if (_objs_with_preserved_marks == NULL) {
 670       assert(_preserved_marks_of_objs == NULL, "Both or none.");
 671       _objs_with_preserved_marks = new (ResourceObj::C_HEAP) 
 672         GrowableArray<oop>(PreserveMarkStackSize, true);
 673       _preserved_marks_of_objs = new (ResourceObj::C_HEAP) 
 674         GrowableArray<markOop>(PreserveMarkStackSize, true);
 675     }
 676     _objs_with_preserved_marks->push(obj);
 677     _preserved_marks_of_objs->push(m);
 678   }
 679 }
 680 
 681 void DefNewGeneration::handle_promotion_failure(oop old) {
 682   preserve_mark_if_necessary(old, old->mark());
 683   // forward to self
 684   old->forward_to(old);
 685   _promotion_failed = true;
 686 
 687   push_on_promo_failure_scan_stack(old);
 688 
 689   if (!_promo_failure_drain_in_progress) {
 690     // prevent recursion in copy_to_survivor_space()
 691     _promo_failure_drain_in_progress = true;
 692     drain_promo_failure_scan_stack();
 693     _promo_failure_drain_in_progress = false;
 694   }
 695 }
 696 
 697 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 698   assert(is_in_reserved(old) && !old->is_forwarded(),
 699          "shouldn't be scavenging this oop"); 
 700   size_t s = old->size();
 701   oop obj = NULL;
 702   
 703   // Try allocating obj in to-space (unless too old)
 704   if (old->age() < tenuring_threshold()) {
 705     obj = (oop) to()->allocate(s);
 706   }
 707 
 708   // Otherwise try allocating obj tenured
 709   if (obj == NULL) {
 710     obj = _next_gen->promote(old, s);
 711     if (obj == NULL) {
 712       if (!HandlePromotionFailure) {
 713         // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
 714         // is incorrectly set. In any case, its seriously wrong to be here!
 715         vm_exit_out_of_memory(s*wordSize, "promotion");
 716       }
 717 
 718       handle_promotion_failure(old);
 719       return old;
 720     }
 721   } else {
 722     // Prefetch beyond obj
 723     const intx interval = PrefetchCopyIntervalInBytes;
 724     Prefetch::write(obj, interval);
 725 
 726     // Copy obj
 727     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
 728 
 729     // Increment age if obj still in new generation
 730     obj->incr_age(); 
 731     age_table()->add(obj, s);
 732   }
 733 
 734   // Done, insert forward pointer to obj in this header
 735   old->forward_to(obj);
 736 
 737   return obj;
 738 }
 739 
 740 void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
 741   if (_promo_failure_scan_stack == NULL) {
 742     _promo_failure_scan_stack = new (ResourceObj::C_HEAP)
 743                                     GrowableArray<oop>(40, true);
 744   }
 745 
 746   _promo_failure_scan_stack->push(obj);
 747 }
 748 
 749 void DefNewGeneration::drain_promo_failure_scan_stack() {
 750   assert(_promo_failure_scan_stack != NULL, "precondition");
 751 
 752   while (_promo_failure_scan_stack->length() > 0) {
 753      oop obj = _promo_failure_scan_stack->pop();
 754      obj->oop_iterate(_promo_failure_scan_stack_closure);
 755   }
 756 }
 757 
 758 void DefNewGeneration::save_marks() { 
 759   eden()->set_saved_mark();
 760   to()->set_saved_mark();
 761   from()->set_saved_mark();
 762 }
 763 
 764 
 765 void DefNewGeneration::reset_saved_marks() { 
 766   eden()->reset_saved_mark();
 767   to()->reset_saved_mark();
 768   from()->reset_saved_mark();
 769 }
 770 
 771 
 772 bool DefNewGeneration::no_allocs_since_save_marks() {
 773   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 774   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 775   return to()->saved_mark_at_top();
 776 }
 777 
 778 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
 779                                                                 \
 780 void DefNewGeneration::                                         \
 781 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
 782   cl->set_generation(this);                                  \
 783   eden()->oop_since_save_marks_iterate##nv_suffix(cl);               \
 784   to()->oop_since_save_marks_iterate##nv_suffix(cl);         \
 785   from()->oop_since_save_marks_iterate##nv_suffix(cl);               \
 786   cl->reset_generation();                                    \
 787   save_marks();                                                 \
 788 }
 789 
 790 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
 791 
 792 #undef DefNew_SINCE_SAVE_MARKS_DEFN
 793 
 794 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
 795                                          size_t max_alloc_words) {
 796   if (requestor == this || _promotion_failed) return;
 797   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
 798 
 799   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
 800   if (to_space->top() > to_space->bottom()) {
 801     trace("to_space not empty when contribute_scratch called");
 802   }
 803   */
 804 
 805   ContiguousSpace* to_space = to();
 806   assert(to_space->end() >= to_space->top(), "pointers out of order");
 807   size_t free_words = pointer_delta(to_space->end(), to_space->top());
 808   if (free_words >= MinFreeScratchWords) {
 809     ScratchBlock* sb = (ScratchBlock*)to_space->top();
 810     sb->num_words = free_words;
 811     sb->next = list;
 812     list = sb;
 813   }
 814 }
 815 
 816 void DefNewGeneration::reset_scratch() {
 817   // If contributing scratch in to_space, mangle all of
 818   // to_space if ZapUnusedHeapArea.  This is needed because
 819   // top is not maintained while using to-space as scratch.
 820   if (ZapUnusedHeapArea) {
 821     to()->mangle_unused_area_complete();
 822   }
 823 }
 824 
 825 bool DefNewGeneration::collection_attempt_is_safe() {
 826   if (!to()->is_empty()) {
 827     return false;
 828   }
 829   if (_next_gen == NULL) {
 830     GenCollectedHeap* gch = GenCollectedHeap::heap();
 831     _next_gen = gch->next_gen(this);
 832     assert(_next_gen != NULL, 
 833            "This must be the youngest gen, and not the only gen");
 834   }
 835 
 836   // Decide if there's enough room for a full promotion
 837   // When using extremely large edens, we effectively lose a 
 838   // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio" 
 839   // flag to reduce the minimum evacuation space requirements. If 
 840   // there is not enough space to evacuate eden during a scavenge, 
 841   // the VM will immediately exit with an out of memory error. 
 842   // This flag has not been tested
 843   // with collectors other than simple mark & sweep.
 844   //
 845   // Note that with the addition of promotion failure handling, the
 846   // VM will not immediately exit but will undo the young generation
 847   // collection.  The parameter is left here for compatibility.
 848   const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
 849 
 850   // worst_case_evacuation is based on "used()".  For the case where this
 851   // method is called after a collection, this is still appropriate because
 852   // the case that needs to be detected is one in which a full collection
 853   // has been done and has overflowed into the young generation.  In that
 854   // case a minor collection will fail (the overflow of the full collection
 855   // means there is no space in the old generation for any promotion).
 856   size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
 857 
 858   return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
 859                                               HandlePromotionFailure);
 860 }
 861 
 862 void DefNewGeneration::gc_epilogue(bool full) {
 863   // Check if the heap is approaching full after a collection has
 864   // been done.  Generally the young generation is empty at
 865   // a minimum at the end of a collection.  If it is not, then
 866   // the heap is approaching full.
 867   GenCollectedHeap* gch = GenCollectedHeap::heap();
 868   clear_should_allocate_from_space();
 869   if (collection_attempt_is_safe()) {
 870     gch->clear_incremental_collection_will_fail();
 871   } else {
 872     gch->set_incremental_collection_will_fail();
 873     if (full) { // we seem to be running out of space
 874       set_should_allocate_from_space();
 875     }
 876   }
 877 
 878   if (ZapUnusedHeapArea) {
 879     eden()->check_mangled_unused_area_complete();
 880     from()->check_mangled_unused_area_complete();
 881     to()->check_mangled_unused_area_complete();
 882   }
 883 
 884   // update the generation and space performance counters
 885   update_counters();
 886   gch->collector_policy()->counters()->update_counters();
 887 }
 888 
 889 void DefNewGeneration::record_spaces_top() {
 890   assert(ZapUnusedHeapArea, "Not mangling unused space");
 891   eden()->set_top_for_allocations();
 892   to()->set_top_for_allocations();
 893   from()->set_top_for_allocations();
 894 }
 895 
 896 
 897 void DefNewGeneration::update_counters() {
 898   if (UsePerfData) {
 899     _eden_counters->update_all();
 900     _from_counters->update_all();
 901     _to_counters->update_all();
 902     _gen_counters->update_all();
 903   }
 904 }
 905 
 906 void DefNewGeneration::verify(bool allow_dirty) {
 907   eden()->verify(allow_dirty);
 908   from()->verify(allow_dirty);
 909     to()->verify(allow_dirty);
 910 }
 911 
 912 void DefNewGeneration::print_on(outputStream* st) const {
 913   Generation::print_on(st);
 914   st->print("  eden");
 915   eden()->print_on(st);
 916   st->print("  from");
 917   from()->print_on(st);
 918   st->print("  to  ");
 919   to()->print_on(st);
 920 }
 921 
 922 
 923 const char* DefNewGeneration::name() const {
 924   return "def new generation";
 925 }
 926 
 927 // Moved from inline file as they are not called inline
 928 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
 929   return eden();
 930 }
 931 
 932 HeapWord* DefNewGeneration::allocate(size_t word_size,
 933                                      bool is_tlab) {
 934   // This is the slow-path allocation for the DefNewGeneration.
 935   // Most allocations are fast-path in compiled code.
 936   // We try to allocate from the eden.  If that works, we are happy.
 937   // Note that since DefNewGeneration supports lock-free allocation, we
 938   // have to use it here, as well.
 939   HeapWord* result = eden()->par_allocate(word_size);
 940   if (result != NULL) {
 941     return result;
 942   }
 943   do {
 944     HeapWord* old_limit = eden()->soft_end();
 945     if (old_limit < eden()->end()) {
 946       // Tell the next generation we reached a limit.
 947       HeapWord* new_limit =
 948         next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
 949       if (new_limit != NULL) {
 950         Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
 951       } else {
 952         assert(eden()->soft_end() == eden()->end(),
 953                "invalid state after allocation_limit_reached returned null");
 954       }
 955     } else {
 956       // The allocation failed and the soft limit is equal to the hard limit,
 957       // there are no reasons to do an attempt to allocate
 958       assert(old_limit == eden()->end(), "sanity check");
 959       break;
 960     }
 961     // Try to allocate until succeeded or the soft limit can't be adjusted
 962     result = eden()->par_allocate(word_size);
 963   } while (result == NULL);
 964 
 965   // If the eden is full and the last collection bailed out, we are running
 966   // out of heap space, and we try to allocate the from-space, too.
 967   // allocate_from_space can't be inlined because that would introduce a
 968   // circular dependency at compile time.
 969   if (result == NULL) {
 970     result = allocate_from_space(word_size);
 971   }
 972   return result;
 973 }
 974 
 975 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
 976                                          bool is_tlab) {
 977   return eden()->par_allocate(word_size);
 978 }
 979 
 980 void DefNewGeneration::gc_prologue(bool full) {
 981   // Ensure that _end and _soft_end are the same in eden space.
 982   eden()->set_soft_end(eden()->end());
 983 }
 984 
 985 size_t DefNewGeneration::tlab_capacity() const {
 986   return eden()->capacity();
 987 }
 988 
 989 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
 990   return unsafe_max_alloc_nogc();
 991 }