1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_defNewGeneration.cpp.incl"
  27 
  28 //
  29 // DefNewGeneration functions.
  30 
  31 // Methods of protected closure types.
  32 
  33 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
  34   assert(g->level() == 0, "Optimized for youngest gen.");
  35 }
  36 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
  37   assert(false, "Do not call.");
  38 }
  39 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
  40   return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
  41 }
  42 
  43 DefNewGeneration::KeepAliveClosure::
  44 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
  45   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
  46   assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
  47   _rs = (CardTableRS*)rs;
  48 }
  49 
  50 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  51 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  52 
  53 
  54 DefNewGeneration::FastKeepAliveClosure::
  55 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
  56   DefNewGeneration::KeepAliveClosure(cl) {
  57   _boundary = g->reserved().end();
  58 }
  59 
  60 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  61 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  62 
  63 DefNewGeneration::EvacuateFollowersClosure::
  64 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  65                          ScanClosure* cur, ScanClosure* older) :
  66   _gch(gch), _level(level),
  67   _scan_cur_or_nonheap(cur), _scan_older(older)
  68 {}
  69 
  70 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
  71   do {
  72     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
  73                                        _scan_older);
  74   } while (!_gch->no_allocs_since_save_marks(_level));
  75 }
  76 
  77 DefNewGeneration::FastEvacuateFollowersClosure::
  78 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  79                              DefNewGeneration* gen,
  80                              FastScanClosure* cur, FastScanClosure* older) :
  81   _gch(gch), _level(level), _gen(gen),
  82   _scan_cur_or_nonheap(cur), _scan_older(older)
  83 {}
  84 
  85 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
  86   do {
  87     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
  88                                        _scan_older);
  89   } while (!_gch->no_allocs_since_save_marks(_level));
  90   guarantee(_gen->promo_failure_scan_stack() == NULL
  91             || _gen->promo_failure_scan_stack()->length() == 0,
  92             "Failed to finish scan");
  93 }
  94 
  95 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
  96   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
  97 {
  98   assert(_g->level() == 0, "Optimized for youngest generation");
  99   _boundary = _g->reserved().end();
 100 }
 101 
 102 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 103 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 104 
 105 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 106   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 107 {
 108   assert(_g->level() == 0, "Optimized for youngest generation");
 109   _boundary = _g->reserved().end();
 110 }
 111 
 112 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 113 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 114 
 115 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 116   OopClosure(g->ref_processor()), _g(g)
 117 {
 118   assert(_g->level() == 0, "Optimized for youngest generation");
 119   _boundary = _g->reserved().end();
 120 }
 121 
 122 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 123 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 124 
 125 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 126 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 127 
 128 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 129                                    size_t initial_size,
 130                                    int level,
 131                                    const char* policy)
 132   : Generation(rs, initial_size, level),
 133     _objs_with_preserved_marks(NULL),
 134     _preserved_marks_of_objs(NULL),
 135     _promo_failure_scan_stack(NULL),
 136     _promo_failure_drain_in_progress(false),
 137     _should_allocate_from_space(false)
 138 {
 139   MemRegion cmr((HeapWord*)_virtual_space.low(),
 140                 (HeapWord*)_virtual_space.high());
 141   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 142 
 143   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 144     _eden_space = new ConcEdenSpace(this);
 145   } else {
 146     _eden_space = new EdenSpace(this);
 147   }
 148   _from_space = new ContiguousSpace();
 149   _to_space   = new ContiguousSpace();
 150 
 151   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 152     vm_exit_during_initialization("Could not allocate a new gen space");
 153 
 154   // Compute the maximum eden and survivor space sizes. These sizes
 155   // are computed assuming the entire reserved space is committed.
 156   // These values are exported as performance counters.
 157   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 158   uintx size = _virtual_space.reserved_size();
 159   _max_survivor_size = compute_survivor_size(size, alignment);
 160   _max_eden_size = size - (2*_max_survivor_size);
 161 
 162   // allocate the performance counters
 163 
 164   // Generation counters -- generation 0, 3 subspaces
 165   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
 166   _gc_counters = new CollectorCounters(policy, 0);
 167 
 168   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 169                                       _gen_counters);
 170   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 171                                       _gen_counters);
 172   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 173                                     _gen_counters);
 174 
 175   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 176   update_counters();
 177   _next_gen = NULL;
 178   _tenuring_threshold = MaxTenuringThreshold;
 179   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 180 }
 181 
 182 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 183                                                 bool clear_space,
 184                                                 bool mangle_space) {
 185   uintx alignment =
 186     GenCollectedHeap::heap()->collector_policy()->min_alignment();
 187 
 188   // If the spaces are being cleared (only done at heap initialization
 189   // currently), the survivor spaces need not be empty.
 190   // Otherwise, no care is taken for used areas in the survivor spaces
 191   // so check.
 192   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 193     "Initialization of the survivor spaces assumes these are empty");
 194 
 195   // Compute sizes
 196   uintx size = _virtual_space.committed_size();
 197   uintx survivor_size = compute_survivor_size(size, alignment);
 198   uintx eden_size = size - (2*survivor_size);
 199   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 200 
 201   if (eden_size < minimum_eden_size) {
 202     // May happen due to 64Kb rounding, if so adjust eden size back up
 203     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
 204     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 205     uintx unaligned_survivor_size =
 206       align_size_down(maximum_survivor_size, alignment);
 207     survivor_size = MAX2(unaligned_survivor_size, alignment);
 208     eden_size = size - (2*survivor_size);
 209     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 210     assert(eden_size >= minimum_eden_size, "just checking");
 211   }
 212 
 213   char *eden_start = _virtual_space.low();
 214   char *from_start = eden_start + eden_size;
 215   char *to_start   = from_start + survivor_size;
 216   char *to_end     = to_start   + survivor_size;
 217 
 218   assert(to_end == _virtual_space.high(), "just checking");
 219   assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
 220   assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
 221   assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
 222 
 223   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 224   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 225   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 226 
 227   // A minimum eden size implies that there is a part of eden that
 228   // is being used and that affects the initialization of any
 229   // newly formed eden.
 230   bool live_in_eden = minimum_eden_size > 0;
 231 
 232   // If not clearing the spaces, do some checking to verify that
 233   // the space are already mangled.
 234   if (!clear_space) {
 235     // Must check mangling before the spaces are reshaped.  Otherwise,
 236     // the bottom or end of one space may have moved into another
 237     // a failure of the check may not correctly indicate which space
 238     // is not properly mangled.
 239     if (ZapUnusedHeapArea) {
 240       HeapWord* limit = (HeapWord*) _virtual_space.high();
 241       eden()->check_mangled_unused_area(limit);
 242       from()->check_mangled_unused_area(limit);
 243         to()->check_mangled_unused_area(limit);
 244     }
 245   }
 246 
 247   // Reset the spaces for their new regions.
 248   eden()->initialize(edenMR,
 249                      clear_space && !live_in_eden,
 250                      SpaceDecorator::Mangle);
 251   // If clear_space and live_in_eden, we will not have cleared any
 252   // portion of eden above its top. This can cause newly
 253   // expanded space not to be mangled if using ZapUnusedHeapArea.
 254   // We explicitly do such mangling here.
 255   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
 256     eden()->mangle_unused_area();
 257   }
 258   from()->initialize(fromMR, clear_space, mangle_space);
 259   to()->initialize(toMR, clear_space, mangle_space);
 260 
 261   // Set next compaction spaces.
 262   eden()->set_next_compaction_space(from());
 263   // The to-space is normally empty before a compaction so need
 264   // not be considered.  The exception is during promotion
 265   // failure handling when to-space can contain live objects.
 266   from()->set_next_compaction_space(NULL);
 267 }
 268 
 269 void DefNewGeneration::swap_spaces() {
 270   ContiguousSpace* s = from();
 271   _from_space        = to();
 272   _to_space          = s;
 273   eden()->set_next_compaction_space(from());
 274   // The to-space is normally empty before a compaction so need
 275   // not be considered.  The exception is during promotion
 276   // failure handling when to-space can contain live objects.
 277   from()->set_next_compaction_space(NULL);
 278 
 279   if (UsePerfData) {
 280     CSpaceCounters* c = _from_counters;
 281     _from_counters = _to_counters;
 282     _to_counters = c;
 283   }
 284 }
 285 
 286 bool DefNewGeneration::expand(size_t bytes) {
 287   MutexLocker x(ExpandHeap_lock);
 288   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
 289   bool success = _virtual_space.expand_by(bytes);
 290   if (success && ZapUnusedHeapArea) {
 291     // Mangle newly committed space immediately because it
 292     // can be done here more simply that after the new
 293     // spaces have been computed.
 294     HeapWord* new_high = (HeapWord*) _virtual_space.high();
 295     MemRegion mangle_region(prev_high, new_high);
 296     SpaceMangler::mangle_region(mangle_region);
 297   }
 298 
 299   // Do not attempt an expand-to-the reserve size.  The
 300   // request should properly observe the maximum size of
 301   // the generation so an expand-to-reserve should be
 302   // unnecessary.  Also a second call to expand-to-reserve
 303   // value potentially can cause an undue expansion.
 304   // For example if the first expand fail for unknown reasons,
 305   // but the second succeeds and expands the heap to its maximum
 306   // value.
 307   if (GC_locker::is_active()) {
 308     if (PrintGC && Verbose) {
 309       gclog_or_tty->print_cr("Garbage collection disabled, "
 310         "expanded heap instead");
 311     }
 312   }
 313 
 314   return success;
 315 }
 316 
 317 
 318 void DefNewGeneration::compute_new_size() {
 319   // This is called after a gc that includes the following generation
 320   // (which is required to exist.)  So from-space will normally be empty.
 321   // Note that we check both spaces, since if scavenge failed they revert roles.
 322   // If not we bail out (otherwise we would have to relocate the objects)
 323   if (!from()->is_empty() || !to()->is_empty()) {
 324     return;
 325   }
 326 
 327   int next_level = level() + 1;
 328   GenCollectedHeap* gch = GenCollectedHeap::heap();
 329   assert(next_level < gch->_n_gens,
 330          "DefNewGeneration cannot be an oldest gen");
 331 
 332   Generation* next_gen = gch->_gens[next_level];
 333   size_t old_size = next_gen->capacity();
 334   size_t new_size_before = _virtual_space.committed_size();
 335   size_t min_new_size = spec()->init_size();
 336   size_t max_new_size = reserved().byte_size();
 337   assert(min_new_size <= new_size_before &&
 338          new_size_before <= max_new_size,
 339          "just checking");
 340   // All space sizes must be multiples of Generation::GenGrain.
 341   size_t alignment = Generation::GenGrain;
 342 
 343   // Compute desired new generation size based on NewRatio and
 344   // NewSizeThreadIncrease
 345   size_t desired_new_size = old_size/NewRatio;
 346   int threads_count = Threads::number_of_non_daemon_threads();
 347   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
 348   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
 349 
 350   // Adjust new generation size
 351   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
 352   assert(desired_new_size <= max_new_size, "just checking");
 353 
 354   bool changed = false;
 355   if (desired_new_size > new_size_before) {
 356     size_t change = desired_new_size - new_size_before;
 357     assert(change % alignment == 0, "just checking");
 358     if (expand(change)) {
 359        changed = true;
 360     }
 361     // If the heap failed to expand to the desired size,
 362     // "changed" will be false.  If the expansion failed
 363     // (and at this point it was expected to succeed),
 364     // ignore the failure (leaving "changed" as false).
 365   }
 366   if (desired_new_size < new_size_before && eden()->is_empty()) {
 367     // bail out of shrinking if objects in eden
 368     size_t change = new_size_before - desired_new_size;
 369     assert(change % alignment == 0, "just checking");
 370     _virtual_space.shrink_by(change);
 371     changed = true;
 372   }
 373   if (changed) {
 374     // The spaces have already been mangled at this point but
 375     // may not have been cleared (set top = bottom) and should be.
 376     // Mangling was done when the heap was being expanded.
 377     compute_space_boundaries(eden()->used(),
 378                              SpaceDecorator::Clear,
 379                              SpaceDecorator::DontMangle);
 380     MemRegion cmr((HeapWord*)_virtual_space.low(),
 381                   (HeapWord*)_virtual_space.high());
 382     Universe::heap()->barrier_set()->resize_covered_region(cmr);
 383     if (Verbose && PrintGC) {
 384       size_t new_size_after  = _virtual_space.committed_size();
 385       size_t eden_size_after = eden()->capacity();
 386       size_t survivor_size_after = from()->capacity();
 387       gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
 388         SIZE_FORMAT "K [eden="
 389         SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
 390         new_size_before/K, new_size_after/K,
 391         eden_size_after/K, survivor_size_after/K);
 392       if (WizardMode) {
 393         gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
 394           thread_increase_size/K, threads_count);
 395       }
 396       gclog_or_tty->cr();
 397     }
 398   }
 399 }
 400 
 401 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
 402   // $$$ This may be wrong in case of "scavenge failure"?
 403   eden()->object_iterate(cl);
 404 }
 405 
 406 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
 407   assert(false, "NYI -- are you sure you want to call this?");
 408 }
 409 
 410 
 411 size_t DefNewGeneration::capacity() const {
 412   return eden()->capacity()
 413        + from()->capacity();  // to() is only used during scavenge
 414 }
 415 
 416 
 417 size_t DefNewGeneration::used() const {
 418   return eden()->used()
 419        + from()->used();      // to() is only used during scavenge
 420 }
 421 
 422 
 423 size_t DefNewGeneration::free() const {
 424   return eden()->free()
 425        + from()->free();      // to() is only used during scavenge
 426 }
 427 
 428 size_t DefNewGeneration::max_capacity() const {
 429   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 430   const size_t reserved_bytes = reserved().byte_size();
 431   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
 432 }
 433 
 434 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 435   return eden()->free();
 436 }
 437 
 438 size_t DefNewGeneration::capacity_before_gc() const {
 439   return eden()->capacity();
 440 }
 441 
 442 size_t DefNewGeneration::contiguous_available() const {
 443   return eden()->free();
 444 }
 445 
 446 
 447 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 448 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 449 
 450 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 451   eden()->object_iterate(blk);
 452   from()->object_iterate(blk);
 453 }
 454 
 455 
 456 void DefNewGeneration::space_iterate(SpaceClosure* blk,
 457                                      bool usedOnly) {
 458   blk->do_space(eden());
 459   blk->do_space(from());
 460   blk->do_space(to());
 461 }
 462 
 463 // The last collection bailed out, we are running out of heap space,
 464 // so we try to allocate the from-space, too.
 465 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 466   HeapWord* result = NULL;
 467   if (PrintGC && Verbose) {
 468     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
 469                   "  will_fail: %s"
 470                   "  heap_lock: %s"
 471                   "  free: " SIZE_FORMAT,
 472                   size,
 473                GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
 474                Heap_lock->is_locked() ? "locked" : "unlocked",
 475                from()->free());
 476     }
 477   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
 478     if (Heap_lock->owned_by_self() ||
 479         (SafepointSynchronize::is_at_safepoint() &&
 480          Thread::current()->is_VM_thread())) {
 481       // If the Heap_lock is not locked by this thread, this will be called
 482       // again later with the Heap_lock held.
 483       result = from()->allocate(size);
 484     } else if (PrintGC && Verbose) {
 485       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
 486     }
 487   } else if (PrintGC && Verbose) {
 488     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
 489   }
 490   if (PrintGC && Verbose) {
 491     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
 492   }
 493   return result;
 494 }
 495 
 496 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 497                                                 bool   is_tlab,
 498                                                 bool   parallel) {
 499   // We don't attempt to expand the young generation (but perhaps we should.)
 500   return allocate(size, is_tlab);
 501 }
 502 
 503 
 504 void DefNewGeneration::collect(bool   full,
 505                                bool   clear_all_soft_refs,
 506                                size_t size,
 507                                bool   is_tlab) {
 508   assert(full || size > 0, "otherwise we don't want to collect");
 509   GenCollectedHeap* gch = GenCollectedHeap::heap();
 510   _next_gen = gch->next_gen(this);
 511   assert(_next_gen != NULL,
 512     "This must be the youngest gen, and not the only gen");
 513 
 514   // If the next generation is too full to accomodate promotion
 515   // from this generation, pass on collection; let the next generation
 516   // do it.
 517   if (!collection_attempt_is_safe()) {
 518     gch->set_incremental_collection_will_fail();
 519     return;
 520   }
 521   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 522 
 523   init_assuming_no_promotion_failure();
 524 
 525   TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
 526   // Capture heap used before collection (for printing).
 527   size_t gch_prev_used = gch->used();
 528 
 529   SpecializationStats::clear();
 530 
 531   // These can be shared for all code paths
 532   IsAliveClosure is_alive(this);
 533   ScanWeakRefClosure scan_weak_ref(this);
 534 
 535   age_table()->clear();
 536   to()->clear(SpaceDecorator::Mangle);
 537 
 538   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 539 
 540   assert(gch->no_allocs_since_save_marks(0),
 541          "save marks have not been newly set.");
 542 
 543   // Not very pretty.
 544   CollectorPolicy* cp = gch->collector_policy();
 545 
 546   FastScanClosure fsc_with_no_gc_barrier(this, false);
 547   FastScanClosure fsc_with_gc_barrier(this, true);
 548 
 549   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 550   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
 551                                                   &fsc_with_no_gc_barrier,
 552                                                   &fsc_with_gc_barrier);
 553 
 554   assert(gch->no_allocs_since_save_marks(0),
 555          "save marks have not been newly set.");
 556 
 557   gch->gen_process_strong_roots(_level,
 558                                 true,  // Process younger gens, if any,
 559                                        // as strong roots.
 560                                 true,  // activate StrongRootsScope
 561                                 false, // not collecting perm generation.
 562                                 SharedHeap::SO_AllClasses,
 563                                 &fsc_with_no_gc_barrier,
 564                                 true,   // walk *all* scavengable nmethods
 565                                 &fsc_with_gc_barrier);
 566 
 567   // "evacuate followers".
 568   evacuate_followers.do_void();
 569 
 570   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 571   ReferenceProcessor* rp = ref_processor();
 572   rp->setup_policy(clear_all_soft_refs);
 573   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 574                                     NULL);
 575   if (!promotion_failed()) {
 576     // Swap the survivor spaces.
 577     eden()->clear(SpaceDecorator::Mangle);
 578     from()->clear(SpaceDecorator::Mangle);
 579     if (ZapUnusedHeapArea) {
 580       // This is now done here because of the piece-meal mangling which
 581       // can check for valid mangling at intermediate points in the
 582       // collection(s).  When a minor collection fails to collect
 583       // sufficient space resizing of the young generation can occur
 584       // an redistribute the spaces in the young generation.  Mangle
 585       // here so that unzapped regions don't get distributed to
 586       // other spaces.
 587       to()->mangle_unused_area();
 588     }
 589     swap_spaces();
 590 
 591     assert(to()->is_empty(), "to space should be empty now");
 592 
 593     // Set the desired survivor size to half the real survivor space
 594     _tenuring_threshold =
 595       age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 596 
 597     // A successful scavenge should restart the GC time limit count which is
 598     // for full GC's.
 599     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 600     size_policy->reset_gc_overhead_limit_count();
 601     if (PrintGC && !PrintGCDetails) {
 602       gch->print_heap_change(gch_prev_used);
 603     }
 604   } else {
 605     assert(HandlePromotionFailure,
 606       "Should not be here unless promotion failure handling is on");
 607     assert(_promo_failure_scan_stack != NULL &&
 608       _promo_failure_scan_stack->length() == 0, "post condition");
 609 
 610     // deallocate stack and it's elements
 611     delete _promo_failure_scan_stack;
 612     _promo_failure_scan_stack = NULL;
 613 
 614     remove_forwarding_pointers();
 615     if (PrintGCDetails) {
 616       gclog_or_tty->print(" (promotion failed) ");
 617     }
 618     // Add to-space to the list of space to compact
 619     // when a promotion failure has occurred.  In that
 620     // case there can be live objects in to-space
 621     // as a result of a partial evacuation of eden
 622     // and from-space.
 623     swap_spaces();   // For the sake of uniformity wrt ParNewGeneration::collect().
 624     from()->set_next_compaction_space(to());
 625     gch->set_incremental_collection_will_fail();
 626 
 627     // Inform the next generation that a promotion failure occurred.
 628     _next_gen->promotion_failure_occurred();
 629 
 630     // Reset the PromotionFailureALot counters.
 631     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 632   }
 633   // set new iteration safe limit for the survivor spaces
 634   from()->set_concurrent_iteration_safe_limit(from()->top());
 635   to()->set_concurrent_iteration_safe_limit(to()->top());
 636   SpecializationStats::print();
 637   update_time_of_last_gc(os::javaTimeMillis());
 638 }
 639 
 640 class RemoveForwardPointerClosure: public ObjectClosure {
 641 public:
 642   void do_object(oop obj) {
 643     obj->init_mark();
 644   }
 645 };
 646 
 647 void DefNewGeneration::init_assuming_no_promotion_failure() {
 648   _promotion_failed = false;
 649   from()->set_next_compaction_space(NULL);
 650 }
 651 
 652 void DefNewGeneration::remove_forwarding_pointers() {
 653   RemoveForwardPointerClosure rspc;
 654   eden()->object_iterate(&rspc);
 655   from()->object_iterate(&rspc);
 656   // Now restore saved marks, if any.
 657   if (_objs_with_preserved_marks != NULL) {
 658     assert(_preserved_marks_of_objs != NULL, "Both or none.");
 659     assert(_objs_with_preserved_marks->length() ==
 660            _preserved_marks_of_objs->length(), "Both or none.");
 661     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
 662       oop obj   = _objs_with_preserved_marks->at(i);
 663       markOop m = _preserved_marks_of_objs->at(i);
 664       obj->set_mark(m);
 665     }
 666     delete _objs_with_preserved_marks;
 667     delete _preserved_marks_of_objs;
 668     _objs_with_preserved_marks = NULL;
 669     _preserved_marks_of_objs = NULL;
 670   }
 671 }
 672 
 673 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
 674   if (m->must_be_preserved_for_promotion_failure(obj)) {
 675     if (_objs_with_preserved_marks == NULL) {
 676       assert(_preserved_marks_of_objs == NULL, "Both or none.");
 677       _objs_with_preserved_marks = new (ResourceObj::C_HEAP)
 678         GrowableArray<oop>(PreserveMarkStackSize, true);
 679       _preserved_marks_of_objs = new (ResourceObj::C_HEAP)
 680         GrowableArray<markOop>(PreserveMarkStackSize, true);
 681     }
 682     _objs_with_preserved_marks->push(obj);
 683     _preserved_marks_of_objs->push(m);
 684   }
 685 }
 686 
 687 void DefNewGeneration::handle_promotion_failure(oop old) {
 688   preserve_mark_if_necessary(old, old->mark());
 689   if (!_promotion_failed && PrintPromotionFailure) {
 690     gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
 691                         old->size());
 692   }
 693 
 694   // forward to self
 695   old->forward_to(old);
 696   _promotion_failed = true;
 697 
 698   push_on_promo_failure_scan_stack(old);
 699 
 700   if (!_promo_failure_drain_in_progress) {
 701     // prevent recursion in copy_to_survivor_space()
 702     _promo_failure_drain_in_progress = true;
 703     drain_promo_failure_scan_stack();
 704     _promo_failure_drain_in_progress = false;
 705   }
 706 }
 707 
 708 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 709   assert(is_in_reserved(old) && !old->is_forwarded(),
 710          "shouldn't be scavenging this oop");
 711   size_t s = old->size();
 712   oop obj = NULL;
 713 
 714   // Try allocating obj in to-space (unless too old)
 715   if (old->age() < tenuring_threshold()) {
 716     obj = (oop) to()->allocate(s);
 717   }
 718 
 719   // Otherwise try allocating obj tenured
 720   if (obj == NULL) {
 721     obj = _next_gen->promote(old, s);
 722     if (obj == NULL) {
 723       if (!HandlePromotionFailure) {
 724         // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
 725         // is incorrectly set. In any case, its seriously wrong to be here!
 726         vm_exit_out_of_memory(s*wordSize, "promotion");
 727       }
 728 
 729       handle_promotion_failure(old);
 730       return old;
 731     }
 732   } else {
 733     // Prefetch beyond obj
 734     const intx interval = PrefetchCopyIntervalInBytes;
 735     Prefetch::write(obj, interval);
 736 
 737     // Copy obj
 738     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
 739 
 740     // Increment age if obj still in new generation
 741     obj->incr_age();
 742     age_table()->add(obj, s);
 743   }
 744 
 745   // Done, insert forward pointer to obj in this header
 746   old->forward_to(obj);
 747 
 748   return obj;
 749 }
 750 
 751 void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
 752   if (_promo_failure_scan_stack == NULL) {
 753     _promo_failure_scan_stack = new (ResourceObj::C_HEAP)
 754                                     GrowableArray<oop>(40, true);
 755   }
 756 
 757   _promo_failure_scan_stack->push(obj);
 758 }
 759 
 760 void DefNewGeneration::drain_promo_failure_scan_stack() {
 761   assert(_promo_failure_scan_stack != NULL, "precondition");
 762 
 763   while (_promo_failure_scan_stack->length() > 0) {
 764      oop obj = _promo_failure_scan_stack->pop();
 765      obj->oop_iterate(_promo_failure_scan_stack_closure);
 766   }
 767 }
 768 
 769 void DefNewGeneration::save_marks() {
 770   eden()->set_saved_mark();
 771   to()->set_saved_mark();
 772   from()->set_saved_mark();
 773 }
 774 
 775 
 776 void DefNewGeneration::reset_saved_marks() {
 777   eden()->reset_saved_mark();
 778   to()->reset_saved_mark();
 779   from()->reset_saved_mark();
 780 }
 781 
 782 
 783 bool DefNewGeneration::no_allocs_since_save_marks() {
 784   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 785   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 786   return to()->saved_mark_at_top();
 787 }
 788 
 789 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
 790                                                                 \
 791 void DefNewGeneration::                                         \
 792 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
 793   cl->set_generation(this);                                     \
 794   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 795   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
 796   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 797   cl->reset_generation();                                       \
 798   save_marks();                                                 \
 799 }
 800 
 801 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
 802 
 803 #undef DefNew_SINCE_SAVE_MARKS_DEFN
 804 
 805 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
 806                                          size_t max_alloc_words) {
 807   if (requestor == this || _promotion_failed) return;
 808   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
 809 
 810   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
 811   if (to_space->top() > to_space->bottom()) {
 812     trace("to_space not empty when contribute_scratch called");
 813   }
 814   */
 815 
 816   ContiguousSpace* to_space = to();
 817   assert(to_space->end() >= to_space->top(), "pointers out of order");
 818   size_t free_words = pointer_delta(to_space->end(), to_space->top());
 819   if (free_words >= MinFreeScratchWords) {
 820     ScratchBlock* sb = (ScratchBlock*)to_space->top();
 821     sb->num_words = free_words;
 822     sb->next = list;
 823     list = sb;
 824   }
 825 }
 826 
 827 void DefNewGeneration::reset_scratch() {
 828   // If contributing scratch in to_space, mangle all of
 829   // to_space if ZapUnusedHeapArea.  This is needed because
 830   // top is not maintained while using to-space as scratch.
 831   if (ZapUnusedHeapArea) {
 832     to()->mangle_unused_area_complete();
 833   }
 834 }
 835 
 836 bool DefNewGeneration::collection_attempt_is_safe() {
 837   if (!to()->is_empty()) {
 838     return false;
 839   }
 840   if (_next_gen == NULL) {
 841     GenCollectedHeap* gch = GenCollectedHeap::heap();
 842     _next_gen = gch->next_gen(this);
 843     assert(_next_gen != NULL,
 844            "This must be the youngest gen, and not the only gen");
 845   }
 846 
 847   // Decide if there's enough room for a full promotion
 848   // When using extremely large edens, we effectively lose a
 849   // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio"
 850   // flag to reduce the minimum evacuation space requirements. If
 851   // there is not enough space to evacuate eden during a scavenge,
 852   // the VM will immediately exit with an out of memory error.
 853   // This flag has not been tested
 854   // with collectors other than simple mark & sweep.
 855   //
 856   // Note that with the addition of promotion failure handling, the
 857   // VM will not immediately exit but will undo the young generation
 858   // collection.  The parameter is left here for compatibility.
 859   const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
 860 
 861   // worst_case_evacuation is based on "used()".  For the case where this
 862   // method is called after a collection, this is still appropriate because
 863   // the case that needs to be detected is one in which a full collection
 864   // has been done and has overflowed into the young generation.  In that
 865   // case a minor collection will fail (the overflow of the full collection
 866   // means there is no space in the old generation for any promotion).
 867   size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
 868 
 869   return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
 870                                               HandlePromotionFailure);
 871 }
 872 
 873 void DefNewGeneration::gc_epilogue(bool full) {
 874   // Check if the heap is approaching full after a collection has
 875   // been done.  Generally the young generation is empty at
 876   // a minimum at the end of a collection.  If it is not, then
 877   // the heap is approaching full.
 878   GenCollectedHeap* gch = GenCollectedHeap::heap();
 879   clear_should_allocate_from_space();
 880   if (collection_attempt_is_safe()) {
 881     gch->clear_incremental_collection_will_fail();
 882   } else {
 883     gch->set_incremental_collection_will_fail();
 884     if (full) { // we seem to be running out of space
 885       set_should_allocate_from_space();
 886     }
 887   }
 888 
 889   if (ZapUnusedHeapArea) {
 890     eden()->check_mangled_unused_area_complete();
 891     from()->check_mangled_unused_area_complete();
 892     to()->check_mangled_unused_area_complete();
 893   }
 894 
 895   // update the generation and space performance counters
 896   update_counters();
 897   gch->collector_policy()->counters()->update_counters();
 898 }
 899 
 900 void DefNewGeneration::record_spaces_top() {
 901   assert(ZapUnusedHeapArea, "Not mangling unused space");
 902   eden()->set_top_for_allocations();
 903   to()->set_top_for_allocations();
 904   from()->set_top_for_allocations();
 905 }
 906 
 907 
 908 void DefNewGeneration::update_counters() {
 909   if (UsePerfData) {
 910     _eden_counters->update_all();
 911     _from_counters->update_all();
 912     _to_counters->update_all();
 913     _gen_counters->update_all();
 914   }
 915 }
 916 
 917 void DefNewGeneration::verify(bool allow_dirty) {
 918   eden()->verify(allow_dirty);
 919   from()->verify(allow_dirty);
 920     to()->verify(allow_dirty);
 921 }
 922 
 923 void DefNewGeneration::print_on(outputStream* st) const {
 924   Generation::print_on(st);
 925   st->print("  eden");
 926   eden()->print_on(st);
 927   st->print("  from");
 928   from()->print_on(st);
 929   st->print("  to  ");
 930   to()->print_on(st);
 931 }
 932 
 933 
 934 const char* DefNewGeneration::name() const {
 935   return "def new generation";
 936 }
 937 
 938 // Moved from inline file as they are not called inline
 939 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
 940   return eden();
 941 }
 942 
 943 HeapWord* DefNewGeneration::allocate(size_t word_size,
 944                                      bool is_tlab) {
 945   // This is the slow-path allocation for the DefNewGeneration.
 946   // Most allocations are fast-path in compiled code.
 947   // We try to allocate from the eden.  If that works, we are happy.
 948   // Note that since DefNewGeneration supports lock-free allocation, we
 949   // have to use it here, as well.
 950   HeapWord* result = eden()->par_allocate(word_size);
 951   if (result != NULL) {
 952     return result;
 953   }
 954   do {
 955     HeapWord* old_limit = eden()->soft_end();
 956     if (old_limit < eden()->end()) {
 957       // Tell the next generation we reached a limit.
 958       HeapWord* new_limit =
 959         next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
 960       if (new_limit != NULL) {
 961         Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
 962       } else {
 963         assert(eden()->soft_end() == eden()->end(),
 964                "invalid state after allocation_limit_reached returned null");
 965       }
 966     } else {
 967       // The allocation failed and the soft limit is equal to the hard limit,
 968       // there are no reasons to do an attempt to allocate
 969       assert(old_limit == eden()->end(), "sanity check");
 970       break;
 971     }
 972     // Try to allocate until succeeded or the soft limit can't be adjusted
 973     result = eden()->par_allocate(word_size);
 974   } while (result == NULL);
 975 
 976   // If the eden is full and the last collection bailed out, we are running
 977   // out of heap space, and we try to allocate the from-space, too.
 978   // allocate_from_space can't be inlined because that would introduce a
 979   // circular dependency at compile time.
 980   if (result == NULL) {
 981     result = allocate_from_space(word_size);
 982   }
 983   return result;
 984 }
 985 
 986 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
 987                                          bool is_tlab) {
 988   return eden()->par_allocate(word_size);
 989 }
 990 
 991 void DefNewGeneration::gc_prologue(bool full) {
 992   // Ensure that _end and _soft_end are the same in eden space.
 993   eden()->set_soft_end(eden()->end());
 994 }
 995 
 996 size_t DefNewGeneration::tlab_capacity() const {
 997   return eden()->capacity();
 998 }
 999 
1000 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1001   return unsafe_max_alloc_nogc();
1002 }