1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/collectorCounters.hpp"
  27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  28 #include "gc_implementation/shared/spaceDecorator.hpp"
  29 #include "memory/defNewGeneration.inline.hpp"
  30 #include "memory/gcLocker.inline.hpp"
  31 #include "memory/genCollectedHeap.hpp"
  32 #include "memory/genOopClosures.inline.hpp"
  33 #include "memory/generationSpec.hpp"
  34 #include "memory/iterator.hpp"
  35 #include "memory/referencePolicy.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "oops/instanceRefKlass.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/java.hpp"
  40 #include "utilities/copy.hpp"
  41 #include "utilities/stack.inline.hpp"
  42 #ifdef TARGET_OS_FAMILY_linux
  43 # include "thread_linux.inline.hpp"
  44 #endif
  45 #ifdef TARGET_OS_FAMILY_solaris
  46 # include "thread_solaris.inline.hpp"
  47 #endif
  48 #ifdef TARGET_OS_FAMILY_windows
  49 # include "thread_windows.inline.hpp"
  50 #endif
  51 
  52 //
  53 // DefNewGeneration functions.
  54 
  55 // Methods of protected closure types.
  56 
  57 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
  58   assert(g->level() == 0, "Optimized for youngest gen.");
  59 }
  60 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
  61   assert(false, "Do not call.");
  62 }
  63 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
  64   return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
  65 }
  66 
  67 DefNewGeneration::KeepAliveClosure::
  68 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
  69   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
  70   assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
  71   _rs = (CardTableRS*)rs;
  72 }
  73 
  74 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  75 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  76 
  77 
  78 DefNewGeneration::FastKeepAliveClosure::
  79 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
  80   DefNewGeneration::KeepAliveClosure(cl) {
  81   _boundary = g->reserved().end();
  82 }
  83 
  84 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  85 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  86 
  87 DefNewGeneration::EvacuateFollowersClosure::
  88 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  89                          ScanClosure* cur, ScanClosure* older) :
  90   _gch(gch), _level(level),
  91   _scan_cur_or_nonheap(cur), _scan_older(older)
  92 {}
  93 
  94 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
  95   do {
  96     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
  97                                        _scan_older);
  98   } while (!_gch->no_allocs_since_save_marks(_level));
  99 }
 100 
 101 DefNewGeneration::FastEvacuateFollowersClosure::
 102 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
 103                              DefNewGeneration* gen,
 104                              FastScanClosure* cur, FastScanClosure* older) :
 105   _gch(gch), _level(level), _gen(gen),
 106   _scan_cur_or_nonheap(cur), _scan_older(older)
 107 {}
 108 
 109 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 110   do {
 111     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
 112                                        _scan_older);
 113   } while (!_gch->no_allocs_since_save_marks(_level));
 114   guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 115 }
 116 
 117 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
 118   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 119 {
 120   assert(_g->level() == 0, "Optimized for youngest generation");
 121   _boundary = _g->reserved().end();
 122 }
 123 
 124 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 125 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 126 
 127 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 128   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 129 {
 130   assert(_g->level() == 0, "Optimized for youngest generation");
 131   _boundary = _g->reserved().end();
 132 }
 133 
 134 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 135 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 136 
 137 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 138   OopClosure(g->ref_processor()), _g(g)
 139 {
 140   assert(_g->level() == 0, "Optimized for youngest generation");
 141   _boundary = _g->reserved().end();
 142 }
 143 
 144 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 145 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 146 
 147 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 148 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 149 
 150 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 151                                    size_t initial_size,
 152                                    int level,
 153                                    const char* policy)
 154   : Generation(rs, initial_size, level),
 155     _promo_failure_drain_in_progress(false),
 156     _should_allocate_from_space(false)
 157 {
 158   MemRegion cmr((HeapWord*)_virtual_space.low(),
 159                 (HeapWord*)_virtual_space.high());
 160   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 161 
 162   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 163     _eden_space = new ConcEdenSpace(this);
 164   } else {
 165     _eden_space = new EdenSpace(this);
 166   }
 167   _from_space = new ContiguousSpace();
 168   _to_space   = new ContiguousSpace();
 169 
 170   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 171     vm_exit_during_initialization("Could not allocate a new gen space");
 172 
 173   // Compute the maximum eden and survivor space sizes. These sizes
 174   // are computed assuming the entire reserved space is committed.
 175   // These values are exported as performance counters.
 176   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 177   uintx size = _virtual_space.reserved_size();
 178   _max_survivor_size = compute_survivor_size(size, alignment);
 179   _max_eden_size = size - (2*_max_survivor_size);
 180 
 181   // allocate the performance counters
 182 
 183   // Generation counters -- generation 0, 3 subspaces
 184   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
 185   _gc_counters = new CollectorCounters(policy, 0);
 186 
 187   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 188                                       _gen_counters);
 189   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 190                                       _gen_counters);
 191   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 192                                     _gen_counters);
 193 
 194   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 195   update_counters();
 196   _next_gen = NULL;
 197   _tenuring_threshold = MaxTenuringThreshold;
 198   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 199 }
 200 
 201 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 202                                                 bool clear_space,
 203                                                 bool mangle_space) {
 204   uintx alignment =
 205     GenCollectedHeap::heap()->collector_policy()->min_alignment();
 206 
 207   // If the spaces are being cleared (only done at heap initialization
 208   // currently), the survivor spaces need not be empty.
 209   // Otherwise, no care is taken for used areas in the survivor spaces
 210   // so check.
 211   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 212     "Initialization of the survivor spaces assumes these are empty");
 213 
 214   // Compute sizes
 215   uintx size = _virtual_space.committed_size();
 216   uintx survivor_size = compute_survivor_size(size, alignment);
 217   uintx eden_size = size - (2*survivor_size);
 218   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 219 
 220   if (eden_size < minimum_eden_size) {
 221     // May happen due to 64Kb rounding, if so adjust eden size back up
 222     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
 223     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 224     uintx unaligned_survivor_size =
 225       align_size_down(maximum_survivor_size, alignment);
 226     survivor_size = MAX2(unaligned_survivor_size, alignment);
 227     eden_size = size - (2*survivor_size);
 228     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 229     assert(eden_size >= minimum_eden_size, "just checking");
 230   }
 231 
 232   char *eden_start = _virtual_space.low();
 233   char *from_start = eden_start + eden_size;
 234   char *to_start   = from_start + survivor_size;
 235   char *to_end     = to_start   + survivor_size;
 236 
 237   assert(to_end == _virtual_space.high(), "just checking");
 238   assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
 239   assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
 240   assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
 241 
 242   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 243   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 244   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 245 
 246   // A minimum eden size implies that there is a part of eden that
 247   // is being used and that affects the initialization of any
 248   // newly formed eden.
 249   bool live_in_eden = minimum_eden_size > 0;
 250 
 251   // If not clearing the spaces, do some checking to verify that
 252   // the space are already mangled.
 253   if (!clear_space) {
 254     // Must check mangling before the spaces are reshaped.  Otherwise,
 255     // the bottom or end of one space may have moved into another
 256     // a failure of the check may not correctly indicate which space
 257     // is not properly mangled.
 258     if (ZapUnusedHeapArea) {
 259       HeapWord* limit = (HeapWord*) _virtual_space.high();
 260       eden()->check_mangled_unused_area(limit);
 261       from()->check_mangled_unused_area(limit);
 262         to()->check_mangled_unused_area(limit);
 263     }
 264   }
 265 
 266   // Reset the spaces for their new regions.
 267   eden()->initialize(edenMR,
 268                      clear_space && !live_in_eden,
 269                      SpaceDecorator::Mangle);
 270   // If clear_space and live_in_eden, we will not have cleared any
 271   // portion of eden above its top. This can cause newly
 272   // expanded space not to be mangled if using ZapUnusedHeapArea.
 273   // We explicitly do such mangling here.
 274   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
 275     eden()->mangle_unused_area();
 276   }
 277   from()->initialize(fromMR, clear_space, mangle_space);
 278   to()->initialize(toMR, clear_space, mangle_space);
 279 
 280   // Set next compaction spaces.
 281   eden()->set_next_compaction_space(from());
 282   // The to-space is normally empty before a compaction so need
 283   // not be considered.  The exception is during promotion
 284   // failure handling when to-space can contain live objects.
 285   from()->set_next_compaction_space(NULL);
 286 }
 287 
 288 void DefNewGeneration::swap_spaces() {
 289   ContiguousSpace* s = from();
 290   _from_space        = to();
 291   _to_space          = s;
 292   eden()->set_next_compaction_space(from());
 293   // The to-space is normally empty before a compaction so need
 294   // not be considered.  The exception is during promotion
 295   // failure handling when to-space can contain live objects.
 296   from()->set_next_compaction_space(NULL);
 297 
 298   if (UsePerfData) {
 299     CSpaceCounters* c = _from_counters;
 300     _from_counters = _to_counters;
 301     _to_counters = c;
 302   }
 303 }
 304 
 305 bool DefNewGeneration::expand(size_t bytes) {
 306   MutexLocker x(ExpandHeap_lock);
 307   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
 308   bool success = _virtual_space.expand_by(bytes);
 309   if (success && ZapUnusedHeapArea) {
 310     // Mangle newly committed space immediately because it
 311     // can be done here more simply that after the new
 312     // spaces have been computed.
 313     HeapWord* new_high = (HeapWord*) _virtual_space.high();
 314     MemRegion mangle_region(prev_high, new_high);
 315     SpaceMangler::mangle_region(mangle_region);
 316   }
 317 
 318   // Do not attempt an expand-to-the reserve size.  The
 319   // request should properly observe the maximum size of
 320   // the generation so an expand-to-reserve should be
 321   // unnecessary.  Also a second call to expand-to-reserve
 322   // value potentially can cause an undue expansion.
 323   // For example if the first expand fail for unknown reasons,
 324   // but the second succeeds and expands the heap to its maximum
 325   // value.
 326   if (GC_locker::is_active()) {
 327     if (PrintGC && Verbose) {
 328       gclog_or_tty->print_cr("Garbage collection disabled, "
 329         "expanded heap instead");
 330     }
 331   }
 332 
 333   return success;
 334 }
 335 
 336 
 337 void DefNewGeneration::compute_new_size() {
 338   // This is called after a gc that includes the following generation
 339   // (which is required to exist.)  So from-space will normally be empty.
 340   // Note that we check both spaces, since if scavenge failed they revert roles.
 341   // If not we bail out (otherwise we would have to relocate the objects)
 342   if (!from()->is_empty() || !to()->is_empty()) {
 343     return;
 344   }
 345 
 346   int next_level = level() + 1;
 347   GenCollectedHeap* gch = GenCollectedHeap::heap();
 348   assert(next_level < gch->_n_gens,
 349          "DefNewGeneration cannot be an oldest gen");
 350 
 351   Generation* next_gen = gch->_gens[next_level];
 352   size_t old_size = next_gen->capacity();
 353   size_t new_size_before = _virtual_space.committed_size();
 354   size_t min_new_size = spec()->init_size();
 355   size_t max_new_size = reserved().byte_size();
 356   assert(min_new_size <= new_size_before &&
 357          new_size_before <= max_new_size,
 358          "just checking");
 359   // All space sizes must be multiples of Generation::GenGrain.
 360   size_t alignment = Generation::GenGrain;
 361 
 362   // Compute desired new generation size based on NewRatio and
 363   // NewSizeThreadIncrease
 364   size_t desired_new_size = old_size/NewRatio;
 365   int threads_count = Threads::number_of_non_daemon_threads();
 366   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
 367   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
 368 
 369   // Adjust new generation size
 370   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
 371   assert(desired_new_size <= max_new_size, "just checking");
 372 
 373   bool changed = false;
 374   if (desired_new_size > new_size_before) {
 375     size_t change = desired_new_size - new_size_before;
 376     assert(change % alignment == 0, "just checking");
 377     if (expand(change)) {
 378        changed = true;
 379     }
 380     // If the heap failed to expand to the desired size,
 381     // "changed" will be false.  If the expansion failed
 382     // (and at this point it was expected to succeed),
 383     // ignore the failure (leaving "changed" as false).
 384   }
 385   if (desired_new_size < new_size_before && eden()->is_empty()) {
 386     // bail out of shrinking if objects in eden
 387     size_t change = new_size_before - desired_new_size;
 388     assert(change % alignment == 0, "just checking");
 389     _virtual_space.shrink_by(change);
 390     changed = true;
 391   }
 392   if (changed) {
 393     // The spaces have already been mangled at this point but
 394     // may not have been cleared (set top = bottom) and should be.
 395     // Mangling was done when the heap was being expanded.
 396     compute_space_boundaries(eden()->used(),
 397                              SpaceDecorator::Clear,
 398                              SpaceDecorator::DontMangle);
 399     MemRegion cmr((HeapWord*)_virtual_space.low(),
 400                   (HeapWord*)_virtual_space.high());
 401     Universe::heap()->barrier_set()->resize_covered_region(cmr);
 402     if (Verbose && PrintGC) {
 403       size_t new_size_after  = _virtual_space.committed_size();
 404       size_t eden_size_after = eden()->capacity();
 405       size_t survivor_size_after = from()->capacity();
 406       gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
 407         SIZE_FORMAT "K [eden="
 408         SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
 409         new_size_before/K, new_size_after/K,
 410         eden_size_after/K, survivor_size_after/K);
 411       if (WizardMode) {
 412         gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
 413           thread_increase_size/K, threads_count);
 414       }
 415       gclog_or_tty->cr();
 416     }
 417   }
 418 }
 419 
 420 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
 421   // $$$ This may be wrong in case of "scavenge failure"?
 422   eden()->object_iterate(cl);
 423 }
 424 
 425 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
 426   assert(false, "NYI -- are you sure you want to call this?");
 427 }
 428 
 429 
 430 size_t DefNewGeneration::capacity() const {
 431   return eden()->capacity()
 432        + from()->capacity();  // to() is only used during scavenge
 433 }
 434 
 435 
 436 size_t DefNewGeneration::used() const {
 437   return eden()->used()
 438        + from()->used();      // to() is only used during scavenge
 439 }
 440 
 441 
 442 size_t DefNewGeneration::free() const {
 443   return eden()->free()
 444        + from()->free();      // to() is only used during scavenge
 445 }
 446 
 447 size_t DefNewGeneration::max_capacity() const {
 448   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 449   const size_t reserved_bytes = reserved().byte_size();
 450   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
 451 }
 452 
 453 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 454   return eden()->free();
 455 }
 456 
 457 size_t DefNewGeneration::capacity_before_gc() const {
 458   return eden()->capacity();
 459 }
 460 
 461 size_t DefNewGeneration::contiguous_available() const {
 462   return eden()->free();
 463 }
 464 
 465 
 466 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 467 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 468 
 469 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 470   eden()->object_iterate(blk);
 471   from()->object_iterate(blk);
 472 }
 473 
 474 
 475 void DefNewGeneration::space_iterate(SpaceClosure* blk,
 476                                      bool usedOnly) {
 477   blk->do_space(eden());
 478   blk->do_space(from());
 479   blk->do_space(to());
 480 }
 481 
 482 // The last collection bailed out, we are running out of heap space,
 483 // so we try to allocate the from-space, too.
 484 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 485   HeapWord* result = NULL;
 486   if (PrintGC && Verbose) {
 487     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
 488                   "  will_fail: %s"
 489                   "  heap_lock: %s"
 490                   "  free: " SIZE_FORMAT,
 491                   size,
 492                GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
 493                Heap_lock->is_locked() ? "locked" : "unlocked",
 494                from()->free());
 495     }
 496   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
 497     if (Heap_lock->owned_by_self() ||
 498         (SafepointSynchronize::is_at_safepoint() &&
 499          Thread::current()->is_VM_thread())) {
 500       // If the Heap_lock is not locked by this thread, this will be called
 501       // again later with the Heap_lock held.
 502       result = from()->allocate(size);
 503     } else if (PrintGC && Verbose) {
 504       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
 505     }
 506   } else if (PrintGC && Verbose) {
 507     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
 508   }
 509   if (PrintGC && Verbose) {
 510     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
 511   }
 512   return result;
 513 }
 514 
 515 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 516                                                 bool   is_tlab,
 517                                                 bool   parallel) {
 518   // We don't attempt to expand the young generation (but perhaps we should.)
 519   return allocate(size, is_tlab);
 520 }
 521 
 522 
 523 void DefNewGeneration::collect(bool   full,
 524                                bool   clear_all_soft_refs,
 525                                size_t size,
 526                                bool   is_tlab) {
 527   assert(full || size > 0, "otherwise we don't want to collect");
 528   GenCollectedHeap* gch = GenCollectedHeap::heap();
 529   _next_gen = gch->next_gen(this);
 530   assert(_next_gen != NULL,
 531     "This must be the youngest gen, and not the only gen");
 532 
 533   // If the next generation is too full to accomodate promotion
 534   // from this generation, pass on collection; let the next generation
 535   // do it.
 536   if (!collection_attempt_is_safe()) {
 537     gch->set_incremental_collection_will_fail();
 538     return;
 539   }
 540   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 541 
 542   init_assuming_no_promotion_failure();
 543 
 544   TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
 545   // Capture heap used before collection (for printing).
 546   size_t gch_prev_used = gch->used();
 547 
 548   SpecializationStats::clear();
 549 
 550   // These can be shared for all code paths
 551   IsAliveClosure is_alive(this);
 552   ScanWeakRefClosure scan_weak_ref(this);
 553 
 554   age_table()->clear();
 555   to()->clear(SpaceDecorator::Mangle);
 556 
 557   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 558 
 559   assert(gch->no_allocs_since_save_marks(0),
 560          "save marks have not been newly set.");
 561 
 562   // Not very pretty.
 563   CollectorPolicy* cp = gch->collector_policy();
 564 
 565   FastScanClosure fsc_with_no_gc_barrier(this, false);
 566   FastScanClosure fsc_with_gc_barrier(this, true);
 567 
 568   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 569   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
 570                                                   &fsc_with_no_gc_barrier,
 571                                                   &fsc_with_gc_barrier);
 572 
 573   assert(gch->no_allocs_since_save_marks(0),
 574          "save marks have not been newly set.");
 575 
 576   gch->gen_process_strong_roots(_level,
 577                                 true,  // Process younger gens, if any,
 578                                        // as strong roots.
 579                                 true,  // activate StrongRootsScope
 580                                 false, // not collecting perm generation.
 581                                 SharedHeap::SO_AllClasses,
 582                                 &fsc_with_no_gc_barrier,
 583                                 true,   // walk *all* scavengable nmethods
 584                                 &fsc_with_gc_barrier);
 585 
 586   // "evacuate followers".
 587   evacuate_followers.do_void();
 588 
 589   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 590   ReferenceProcessor* rp = ref_processor();
 591   rp->setup_policy(clear_all_soft_refs);
 592   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 593                                     NULL);
 594   if (!promotion_failed()) {
 595     // Swap the survivor spaces.
 596     eden()->clear(SpaceDecorator::Mangle);
 597     from()->clear(SpaceDecorator::Mangle);
 598     if (ZapUnusedHeapArea) {
 599       // This is now done here because of the piece-meal mangling which
 600       // can check for valid mangling at intermediate points in the
 601       // collection(s).  When a minor collection fails to collect
 602       // sufficient space resizing of the young generation can occur
 603       // an redistribute the spaces in the young generation.  Mangle
 604       // here so that unzapped regions don't get distributed to
 605       // other spaces.
 606       to()->mangle_unused_area();
 607     }
 608     swap_spaces();
 609 
 610     assert(to()->is_empty(), "to space should be empty now");
 611 
 612     // Set the desired survivor size to half the real survivor space
 613     _tenuring_threshold =
 614       age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 615 
 616     // A successful scavenge should restart the GC time limit count which is
 617     // for full GC's.
 618     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 619     size_policy->reset_gc_overhead_limit_count();
 620     if (PrintGC && !PrintGCDetails) {
 621       gch->print_heap_change(gch_prev_used);
 622     }
 623   } else {
 624     assert(HandlePromotionFailure,
 625       "Should not be here unless promotion failure handling is on");
 626     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 627     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 628 
 629     remove_forwarding_pointers();
 630     if (PrintGCDetails) {
 631       gclog_or_tty->print(" (promotion failed) ");
 632     }
 633     // Add to-space to the list of space to compact
 634     // when a promotion failure has occurred.  In that
 635     // case there can be live objects in to-space
 636     // as a result of a partial evacuation of eden
 637     // and from-space.
 638     swap_spaces();   // For uniformity wrt ParNewGeneration.
 639     from()->set_next_compaction_space(to());
 640     gch->set_incremental_collection_will_fail();
 641 
 642     // Inform the next generation that a promotion failure occurred.
 643     _next_gen->promotion_failure_occurred();
 644 
 645     // Reset the PromotionFailureALot counters.
 646     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 647   }
 648   // set new iteration safe limit for the survivor spaces
 649   from()->set_concurrent_iteration_safe_limit(from()->top());
 650   to()->set_concurrent_iteration_safe_limit(to()->top());
 651   SpecializationStats::print();
 652   update_time_of_last_gc(os::javaTimeMillis());
 653 }
 654 
 655 class RemoveForwardPointerClosure: public ObjectClosure {
 656 public:
 657   void do_object(oop obj) {
 658     obj->init_mark();
 659   }
 660 };
 661 
 662 void DefNewGeneration::init_assuming_no_promotion_failure() {
 663   _promotion_failed = false;
 664   from()->set_next_compaction_space(NULL);
 665 }
 666 
 667 void DefNewGeneration::remove_forwarding_pointers() {
 668   RemoveForwardPointerClosure rspc;
 669   eden()->object_iterate(&rspc);
 670   from()->object_iterate(&rspc);
 671 
 672   // Now restore saved marks, if any.
 673   assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
 674          "should be the same");
 675   while (!_objs_with_preserved_marks.is_empty()) {
 676     oop obj   = _objs_with_preserved_marks.pop();
 677     markOop m = _preserved_marks_of_objs.pop();
 678     obj->set_mark(m);
 679   }
 680   _objs_with_preserved_marks.clear(true);
 681   _preserved_marks_of_objs.clear(true);
 682 }
 683 
 684 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
 685   if (m->must_be_preserved_for_promotion_failure(obj)) {
 686     _objs_with_preserved_marks.push(obj);
 687     _preserved_marks_of_objs.push(m);
 688   }
 689 }
 690 
 691 void DefNewGeneration::handle_promotion_failure(oop old) {
 692   preserve_mark_if_necessary(old, old->mark());
 693   if (!_promotion_failed && PrintPromotionFailure) {
 694     gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
 695                         old->size());
 696   }
 697 
 698   // forward to self
 699   old->forward_to(old);
 700   _promotion_failed = true;
 701 
 702   _promo_failure_scan_stack.push(old);
 703 
 704   if (!_promo_failure_drain_in_progress) {
 705     // prevent recursion in copy_to_survivor_space()
 706     _promo_failure_drain_in_progress = true;
 707     drain_promo_failure_scan_stack();
 708     _promo_failure_drain_in_progress = false;
 709   }
 710 }
 711 
 712 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 713   assert(is_in_reserved(old) && !old->is_forwarded(),
 714          "shouldn't be scavenging this oop");
 715   size_t s = old->size();
 716   oop obj = NULL;
 717 
 718   // Try allocating obj in to-space (unless too old)
 719   if (old->age() < tenuring_threshold()) {
 720     obj = (oop) to()->allocate(s);
 721   }
 722 
 723   // Otherwise try allocating obj tenured
 724   if (obj == NULL) {
 725     obj = _next_gen->promote(old, s);
 726     if (obj == NULL) {
 727       if (!HandlePromotionFailure) {
 728         // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
 729         // is incorrectly set. In any case, its seriously wrong to be here!
 730         vm_exit_out_of_memory(s*wordSize, "promotion");
 731       }
 732 
 733       handle_promotion_failure(old);
 734       return old;
 735     }
 736   } else {
 737     // Prefetch beyond obj
 738     const intx interval = PrefetchCopyIntervalInBytes;
 739     Prefetch::write(obj, interval);
 740 
 741     // Copy obj
 742     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
 743 
 744     // Increment age if obj still in new generation
 745     obj->incr_age();
 746     age_table()->add(obj, s);
 747   }
 748 
 749   // Done, insert forward pointer to obj in this header
 750   old->forward_to(obj);
 751 
 752   return obj;
 753 }
 754 
 755 void DefNewGeneration::drain_promo_failure_scan_stack() {
 756   while (!_promo_failure_scan_stack.is_empty()) {
 757      oop obj = _promo_failure_scan_stack.pop();
 758      obj->oop_iterate(_promo_failure_scan_stack_closure);
 759   }
 760 }
 761 
 762 void DefNewGeneration::save_marks() {
 763   eden()->set_saved_mark();
 764   to()->set_saved_mark();
 765   from()->set_saved_mark();
 766 }
 767 
 768 
 769 void DefNewGeneration::reset_saved_marks() {
 770   eden()->reset_saved_mark();
 771   to()->reset_saved_mark();
 772   from()->reset_saved_mark();
 773 }
 774 
 775 
 776 bool DefNewGeneration::no_allocs_since_save_marks() {
 777   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 778   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 779   return to()->saved_mark_at_top();
 780 }
 781 
 782 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
 783                                                                 \
 784 void DefNewGeneration::                                         \
 785 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
 786   cl->set_generation(this);                                     \
 787   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 788   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
 789   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 790   cl->reset_generation();                                       \
 791   save_marks();                                                 \
 792 }
 793 
 794 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
 795 
 796 #undef DefNew_SINCE_SAVE_MARKS_DEFN
 797 
 798 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
 799                                          size_t max_alloc_words) {
 800   if (requestor == this || _promotion_failed) return;
 801   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
 802 
 803   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
 804   if (to_space->top() > to_space->bottom()) {
 805     trace("to_space not empty when contribute_scratch called");
 806   }
 807   */
 808 
 809   ContiguousSpace* to_space = to();
 810   assert(to_space->end() >= to_space->top(), "pointers out of order");
 811   size_t free_words = pointer_delta(to_space->end(), to_space->top());
 812   if (free_words >= MinFreeScratchWords) {
 813     ScratchBlock* sb = (ScratchBlock*)to_space->top();
 814     sb->num_words = free_words;
 815     sb->next = list;
 816     list = sb;
 817   }
 818 }
 819 
 820 void DefNewGeneration::reset_scratch() {
 821   // If contributing scratch in to_space, mangle all of
 822   // to_space if ZapUnusedHeapArea.  This is needed because
 823   // top is not maintained while using to-space as scratch.
 824   if (ZapUnusedHeapArea) {
 825     to()->mangle_unused_area_complete();
 826   }
 827 }
 828 
 829 bool DefNewGeneration::collection_attempt_is_safe() {
 830   if (!to()->is_empty()) {
 831     return false;
 832   }
 833   if (_next_gen == NULL) {
 834     GenCollectedHeap* gch = GenCollectedHeap::heap();
 835     _next_gen = gch->next_gen(this);
 836     assert(_next_gen != NULL,
 837            "This must be the youngest gen, and not the only gen");
 838   }
 839 
 840   // Decide if there's enough room for a full promotion
 841   // When using extremely large edens, we effectively lose a
 842   // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio"
 843   // flag to reduce the minimum evacuation space requirements. If
 844   // there is not enough space to evacuate eden during a scavenge,
 845   // the VM will immediately exit with an out of memory error.
 846   // This flag has not been tested
 847   // with collectors other than simple mark & sweep.
 848   //
 849   // Note that with the addition of promotion failure handling, the
 850   // VM will not immediately exit but will undo the young generation
 851   // collection.  The parameter is left here for compatibility.
 852   const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
 853 
 854   // worst_case_evacuation is based on "used()".  For the case where this
 855   // method is called after a collection, this is still appropriate because
 856   // the case that needs to be detected is one in which a full collection
 857   // has been done and has overflowed into the young generation.  In that
 858   // case a minor collection will fail (the overflow of the full collection
 859   // means there is no space in the old generation for any promotion).
 860   size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
 861 
 862   return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
 863                                               HandlePromotionFailure);
 864 }
 865 
 866 void DefNewGeneration::gc_epilogue(bool full) {
 867   // Check if the heap is approaching full after a collection has
 868   // been done.  Generally the young generation is empty at
 869   // a minimum at the end of a collection.  If it is not, then
 870   // the heap is approaching full.
 871   GenCollectedHeap* gch = GenCollectedHeap::heap();
 872   clear_should_allocate_from_space();
 873   if (collection_attempt_is_safe()) {
 874     gch->clear_incremental_collection_will_fail();
 875   } else {
 876     gch->set_incremental_collection_will_fail();
 877     if (full) { // we seem to be running out of space
 878       set_should_allocate_from_space();
 879     }
 880   }
 881 
 882   if (ZapUnusedHeapArea) {
 883     eden()->check_mangled_unused_area_complete();
 884     from()->check_mangled_unused_area_complete();
 885     to()->check_mangled_unused_area_complete();
 886   }
 887 
 888   // update the generation and space performance counters
 889   update_counters();
 890   gch->collector_policy()->counters()->update_counters();
 891 }
 892 
 893 void DefNewGeneration::record_spaces_top() {
 894   assert(ZapUnusedHeapArea, "Not mangling unused space");
 895   eden()->set_top_for_allocations();
 896   to()->set_top_for_allocations();
 897   from()->set_top_for_allocations();
 898 }
 899 
 900 
 901 void DefNewGeneration::update_counters() {
 902   if (UsePerfData) {
 903     _eden_counters->update_all();
 904     _from_counters->update_all();
 905     _to_counters->update_all();
 906     _gen_counters->update_all();
 907   }
 908 }
 909 
 910 void DefNewGeneration::verify(bool allow_dirty) {
 911   eden()->verify(allow_dirty);
 912   from()->verify(allow_dirty);
 913     to()->verify(allow_dirty);
 914 }
 915 
 916 void DefNewGeneration::print_on(outputStream* st) const {
 917   Generation::print_on(st);
 918   st->print("  eden");
 919   eden()->print_on(st);
 920   st->print("  from");
 921   from()->print_on(st);
 922   st->print("  to  ");
 923   to()->print_on(st);
 924 }
 925 
 926 
 927 const char* DefNewGeneration::name() const {
 928   return "def new generation";
 929 }
 930 
 931 // Moved from inline file as they are not called inline
 932 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
 933   return eden();
 934 }
 935 
 936 HeapWord* DefNewGeneration::allocate(size_t word_size,
 937                                      bool is_tlab) {
 938   // This is the slow-path allocation for the DefNewGeneration.
 939   // Most allocations are fast-path in compiled code.
 940   // We try to allocate from the eden.  If that works, we are happy.
 941   // Note that since DefNewGeneration supports lock-free allocation, we
 942   // have to use it here, as well.
 943   HeapWord* result = eden()->par_allocate(word_size);
 944   if (result != NULL) {
 945     return result;
 946   }
 947   do {
 948     HeapWord* old_limit = eden()->soft_end();
 949     if (old_limit < eden()->end()) {
 950       // Tell the next generation we reached a limit.
 951       HeapWord* new_limit =
 952         next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
 953       if (new_limit != NULL) {
 954         Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
 955       } else {
 956         assert(eden()->soft_end() == eden()->end(),
 957                "invalid state after allocation_limit_reached returned null");
 958       }
 959     } else {
 960       // The allocation failed and the soft limit is equal to the hard limit,
 961       // there are no reasons to do an attempt to allocate
 962       assert(old_limit == eden()->end(), "sanity check");
 963       break;
 964     }
 965     // Try to allocate until succeeded or the soft limit can't be adjusted
 966     result = eden()->par_allocate(word_size);
 967   } while (result == NULL);
 968 
 969   // If the eden is full and the last collection bailed out, we are running
 970   // out of heap space, and we try to allocate the from-space, too.
 971   // allocate_from_space can't be inlined because that would introduce a
 972   // circular dependency at compile time.
 973   if (result == NULL) {
 974     result = allocate_from_space(word_size);
 975   }
 976   return result;
 977 }
 978 
 979 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
 980                                          bool is_tlab) {
 981   return eden()->par_allocate(word_size);
 982 }
 983 
 984 void DefNewGeneration::gc_prologue(bool full) {
 985   // Ensure that _end and _soft_end are the same in eden space.
 986   eden()->set_soft_end(eden()->end());
 987 }
 988 
 989 size_t DefNewGeneration::tlab_capacity() const {
 990   return eden()->capacity();
 991 }
 992 
 993 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
 994   return unsafe_max_alloc_nogc();
 995 }