1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/collectorCounters.hpp"
  27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  28 #include "gc_implementation/shared/gcHeapSummary.hpp"
  29 #include "gc_implementation/shared/gcTimer.hpp"
  30 #include "gc_implementation/shared/gcTraceTime.hpp"
  31 #include "gc_implementation/shared/gcTrace.hpp"
  32 #include "gc_implementation/shared/spaceDecorator.hpp"
  33 #include "memory/defNewGeneration.inline.hpp"
  34 #include "memory/gcLocker.inline.hpp"
  35 #include "memory/genCollectedHeap.hpp"
  36 #include "memory/genOopClosures.inline.hpp"
  37 #include "memory/genRemSet.hpp"
  38 #include "memory/generationSpec.hpp"
  39 #include "memory/iterator.hpp"
  40 #include "memory/referencePolicy.hpp"
  41 #include "memory/space.inline.hpp"
  42 #include "oops/instanceRefKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/thread.inline.hpp"
  46 #include "utilities/copy.hpp"
  47 #include "utilities/stack.inline.hpp"
  48 
  49 //
  50 // DefNewGeneration functions.
  51 
  52 // Methods of protected closure types.
  53 
  54 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
  55   assert(g->level() == 0, "Optimized for youngest gen.");
  56 }
  57 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
  58   return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
  59 }
  60 
  61 DefNewGeneration::KeepAliveClosure::
  62 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
  63   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
  64   assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
  65   _rs = (CardTableRS*)rs;
  66 }
  67 
  68 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  69 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  70 
  71 
  72 DefNewGeneration::FastKeepAliveClosure::
  73 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
  74   DefNewGeneration::KeepAliveClosure(cl) {
  75   _boundary = g->reserved().end();
  76 }
  77 
  78 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  79 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  80 
  81 DefNewGeneration::EvacuateFollowersClosure::
  82 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  83                          ScanClosure* cur, ScanClosure* older) :
  84   _gch(gch), _level(level),
  85   _scan_cur_or_nonheap(cur), _scan_older(older)
  86 {}
  87 
  88 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
  89   do {
  90     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
  91                                        _scan_older);
  92   } while (!_gch->no_allocs_since_save_marks(_level));
  93 }
  94 
  95 DefNewGeneration::FastEvacuateFollowersClosure::
  96 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
  97                              DefNewGeneration* gen,
  98                              FastScanClosure* cur, FastScanClosure* older) :
  99   _gch(gch), _level(level), _gen(gen),
 100   _scan_cur_or_nonheap(cur), _scan_older(older)
 101 {}
 102 
 103 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 104   do {
 105     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
 106                                        _scan_older);
 107   } while (!_gch->no_allocs_since_save_marks(_level));
 108   guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 109 }
 110 
 111 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
 112     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 113 {
 114   assert(_g->level() == 0, "Optimized for youngest generation");
 115   _boundary = _g->reserved().end();
 116 }
 117 
 118 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 119 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 120 
 121 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 122     OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 123 {
 124   assert(_g->level() == 0, "Optimized for youngest generation");
 125   _boundary = _g->reserved().end();
 126 }
 127 
 128 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 129 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 130 
 131 void KlassScanClosure::do_klass(Klass* klass) {
 132 #ifndef PRODUCT
 133   if (TraceScavenge) {
 134     ResourceMark rm;
 135     gclog_or_tty->print_cr("KlassScanClosure::do_klass %p, %s, dirty: %s",
 136                            klass,
 137                            klass->external_name(),
 138                            klass->has_modified_oops() ? "true" : "false");
 139   }
 140 #endif
 141 
 142   // If the klass has not been dirtied we know that there's
 143   // no references into  the young gen and we can skip it.
 144   if (klass->has_modified_oops()) {
 145     if (_accumulate_modified_oops) {
 146       klass->accumulate_modified_oops();
 147     }
 148 
 149     // Clear this state since we're going to scavenge all the metadata.
 150     klass->clear_modified_oops();
 151 
 152     // Tell the closure which Klass is being scanned so that it can be dirtied
 153     // if oops are left pointing into the young gen.
 154     _scavenge_closure->set_scanned_klass(klass);
 155 
 156     klass->oops_do(_scavenge_closure);
 157 
 158     _scavenge_closure->set_scanned_klass(NULL);
 159   }
 160 }
 161 
 162 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 163   _g(g)
 164 {
 165   assert(_g->level() == 0, "Optimized for youngest generation");
 166   _boundary = _g->reserved().end();
 167 }
 168 
 169 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 170 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 171 
 172 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 173 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 174 
 175 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
 176                                    KlassRemSet* klass_rem_set)
 177     : _scavenge_closure(scavenge_closure),
 178       _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
 179 
 180 
 181 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 182                                    size_t initial_size,
 183                                    int level,
 184                                    const char* policy)
 185   : Generation(rs, initial_size, level),
 186     _promo_failure_drain_in_progress(false),
 187     _should_allocate_from_space(false)
 188 {
 189   MemRegion cmr((HeapWord*)_virtual_space.low(),
 190                 (HeapWord*)_virtual_space.high());
 191   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 192 
 193   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 194     _eden_space = new ConcEdenSpace(this);
 195   } else {
 196     _eden_space = new EdenSpace(this);
 197   }
 198   _from_space = new ContiguousSpace();
 199   _to_space   = new ContiguousSpace();
 200 
 201   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 202     vm_exit_during_initialization("Could not allocate a new gen space");
 203 
 204   // Compute the maximum eden and survivor space sizes. These sizes
 205   // are computed assuming the entire reserved space is committed.
 206   // These values are exported as performance counters.
 207   uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
 208   uintx size = _virtual_space.reserved_size();
 209   _max_survivor_size = compute_survivor_size(size, alignment);
 210   _max_eden_size = size - (2*_max_survivor_size);
 211 
 212   // allocate the performance counters
 213 
 214   // Generation counters -- generation 0, 3 subspaces
 215   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
 216   _gc_counters = new CollectorCounters(policy, 0);
 217 
 218   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 219                                       _gen_counters);
 220   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 221                                       _gen_counters);
 222   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 223                                     _gen_counters);
 224 
 225   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 226   update_counters();
 227   _next_gen = NULL;
 228   _tenuring_threshold = MaxTenuringThreshold;
 229   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 230 
 231   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 232 }
 233 
 234 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 235                                                 bool clear_space,
 236                                                 bool mangle_space) {
 237   uintx alignment =
 238     GenCollectedHeap::heap()->collector_policy()->space_alignment();
 239 
 240   // If the spaces are being cleared (only done at heap initialization
 241   // currently), the survivor spaces need not be empty.
 242   // Otherwise, no care is taken for used areas in the survivor spaces
 243   // so check.
 244   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 245     "Initialization of the survivor spaces assumes these are empty");
 246 
 247   // Compute sizes
 248   uintx size = _virtual_space.committed_size();
 249   uintx survivor_size = compute_survivor_size(size, alignment);
 250   uintx eden_size = size - (2*survivor_size);
 251   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 252 
 253   if (eden_size < minimum_eden_size) {
 254     // May happen due to 64Kb rounding, if so adjust eden size back up
 255     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
 256     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 257     uintx unaligned_survivor_size =
 258       align_size_down(maximum_survivor_size, alignment);
 259     survivor_size = MAX2(unaligned_survivor_size, alignment);
 260     eden_size = size - (2*survivor_size);
 261     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 262     assert(eden_size >= minimum_eden_size, "just checking");
 263   }
 264 
 265   char *eden_start = _virtual_space.low();
 266   char *from_start = eden_start + eden_size;
 267   char *to_start   = from_start + survivor_size;
 268   char *to_end     = to_start   + survivor_size;
 269 
 270   assert(to_end == _virtual_space.high(), "just checking");
 271   assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
 272   assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
 273   assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
 274 
 275   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 276   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 277   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 278 
 279   // A minimum eden size implies that there is a part of eden that
 280   // is being used and that affects the initialization of any
 281   // newly formed eden.
 282   bool live_in_eden = minimum_eden_size > 0;
 283 
 284   // If not clearing the spaces, do some checking to verify that
 285   // the space are already mangled.
 286   if (!clear_space) {
 287     // Must check mangling before the spaces are reshaped.  Otherwise,
 288     // the bottom or end of one space may have moved into another
 289     // a failure of the check may not correctly indicate which space
 290     // is not properly mangled.
 291     if (ZapUnusedHeapArea) {
 292       HeapWord* limit = (HeapWord*) _virtual_space.high();
 293       eden()->check_mangled_unused_area(limit);
 294       from()->check_mangled_unused_area(limit);
 295         to()->check_mangled_unused_area(limit);
 296     }
 297   }
 298 
 299   // Reset the spaces for their new regions.
 300   eden()->initialize(edenMR,
 301                      clear_space && !live_in_eden,
 302                      SpaceDecorator::Mangle);
 303   // If clear_space and live_in_eden, we will not have cleared any
 304   // portion of eden above its top. This can cause newly
 305   // expanded space not to be mangled if using ZapUnusedHeapArea.
 306   // We explicitly do such mangling here.
 307   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
 308     eden()->mangle_unused_area();
 309   }
 310   from()->initialize(fromMR, clear_space, mangle_space);
 311   to()->initialize(toMR, clear_space, mangle_space);
 312 
 313   // Set next compaction spaces.
 314   eden()->set_next_compaction_space(from());
 315   // The to-space is normally empty before a compaction so need
 316   // not be considered.  The exception is during promotion
 317   // failure handling when to-space can contain live objects.
 318   from()->set_next_compaction_space(NULL);
 319 }
 320 
 321 void DefNewGeneration::swap_spaces() {
 322   ContiguousSpace* s = from();
 323   _from_space        = to();
 324   _to_space          = s;
 325   eden()->set_next_compaction_space(from());
 326   // The to-space is normally empty before a compaction so need
 327   // not be considered.  The exception is during promotion
 328   // failure handling when to-space can contain live objects.
 329   from()->set_next_compaction_space(NULL);
 330 
 331   if (UsePerfData) {
 332     CSpaceCounters* c = _from_counters;
 333     _from_counters = _to_counters;
 334     _to_counters = c;
 335   }
 336 }
 337 
 338 bool DefNewGeneration::expand(size_t bytes) {
 339   MutexLocker x(ExpandHeap_lock);
 340   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
 341   bool success = _virtual_space.expand_by(bytes);
 342   if (success && ZapUnusedHeapArea) {
 343     // Mangle newly committed space immediately because it
 344     // can be done here more simply that after the new
 345     // spaces have been computed.
 346     HeapWord* new_high = (HeapWord*) _virtual_space.high();
 347     MemRegion mangle_region(prev_high, new_high);
 348     SpaceMangler::mangle_region(mangle_region);
 349   }
 350 
 351   // Do not attempt an expand-to-the reserve size.  The
 352   // request should properly observe the maximum size of
 353   // the generation so an expand-to-reserve should be
 354   // unnecessary.  Also a second call to expand-to-reserve
 355   // value potentially can cause an undue expansion.
 356   // For example if the first expand fail for unknown reasons,
 357   // but the second succeeds and expands the heap to its maximum
 358   // value.
 359   if (GC_locker::is_active()) {
 360     if (PrintGC && Verbose) {
 361       gclog_or_tty->print_cr("Garbage collection disabled, "
 362         "expanded heap instead");
 363     }
 364   }
 365 
 366   return success;
 367 }
 368 
 369 
 370 void DefNewGeneration::compute_new_size() {
 371   // This is called after a gc that includes the following generation
 372   // (which is required to exist.)  So from-space will normally be empty.
 373   // Note that we check both spaces, since if scavenge failed they revert roles.
 374   // If not we bail out (otherwise we would have to relocate the objects)
 375   if (!from()->is_empty() || !to()->is_empty()) {
 376     return;
 377   }
 378 
 379   int next_level = level() + 1;
 380   GenCollectedHeap* gch = GenCollectedHeap::heap();
 381   assert(next_level < gch->_n_gens,
 382          "DefNewGeneration cannot be an oldest gen");
 383 
 384   Generation* next_gen = gch->_gens[next_level];
 385   size_t old_size = next_gen->capacity();
 386   size_t new_size_before = _virtual_space.committed_size();
 387   size_t min_new_size = spec()->init_size();
 388   size_t max_new_size = reserved().byte_size();
 389   assert(min_new_size <= new_size_before &&
 390          new_size_before <= max_new_size,
 391          "just checking");
 392   // All space sizes must be multiples of Generation::GenGrain.
 393   size_t alignment = Generation::GenGrain;
 394 
 395   // Compute desired new generation size based on NewRatio and
 396   // NewSizeThreadIncrease
 397   size_t desired_new_size = old_size/NewRatio;
 398   int threads_count = Threads::number_of_non_daemon_threads();
 399   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
 400   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
 401 
 402   // Adjust new generation size
 403   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
 404   assert(desired_new_size <= max_new_size, "just checking");
 405 
 406   bool changed = false;
 407   if (desired_new_size > new_size_before) {
 408     size_t change = desired_new_size - new_size_before;
 409     assert(change % alignment == 0, "just checking");
 410     if (expand(change)) {
 411        changed = true;
 412     }
 413     // If the heap failed to expand to the desired size,
 414     // "changed" will be false.  If the expansion failed
 415     // (and at this point it was expected to succeed),
 416     // ignore the failure (leaving "changed" as false).
 417   }
 418   if (desired_new_size < new_size_before && eden()->is_empty()) {
 419     // bail out of shrinking if objects in eden
 420     size_t change = new_size_before - desired_new_size;
 421     assert(change % alignment == 0, "just checking");
 422     _virtual_space.shrink_by(change);
 423     changed = true;
 424   }
 425   if (changed) {
 426     // The spaces have already been mangled at this point but
 427     // may not have been cleared (set top = bottom) and should be.
 428     // Mangling was done when the heap was being expanded.
 429     compute_space_boundaries(eden()->used(),
 430                              SpaceDecorator::Clear,
 431                              SpaceDecorator::DontMangle);
 432     MemRegion cmr((HeapWord*)_virtual_space.low(),
 433                   (HeapWord*)_virtual_space.high());
 434     Universe::heap()->barrier_set()->resize_covered_region(cmr);
 435     if (Verbose && PrintGC) {
 436       size_t new_size_after  = _virtual_space.committed_size();
 437       size_t eden_size_after = eden()->capacity();
 438       size_t survivor_size_after = from()->capacity();
 439       gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
 440         SIZE_FORMAT "K [eden="
 441         SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
 442         new_size_before/K, new_size_after/K,
 443         eden_size_after/K, survivor_size_after/K);
 444       if (WizardMode) {
 445         gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
 446           thread_increase_size/K, threads_count);
 447       }
 448       gclog_or_tty->cr();
 449     }
 450   }
 451 }
 452 
 453 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
 454   assert(false, "NYI -- are you sure you want to call this?");
 455 }
 456 
 457 
 458 size_t DefNewGeneration::capacity() const {
 459   return eden()->capacity()
 460        + from()->capacity();  // to() is only used during scavenge
 461 }
 462 
 463 
 464 size_t DefNewGeneration::used() const {
 465   return eden()->used()
 466        + from()->used();      // to() is only used during scavenge
 467 }
 468 
 469 
 470 size_t DefNewGeneration::free() const {
 471   return eden()->free()
 472        + from()->free();      // to() is only used during scavenge
 473 }
 474 
 475 size_t DefNewGeneration::max_capacity() const {
 476   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
 477   const size_t reserved_bytes = reserved().byte_size();
 478   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
 479 }
 480 
 481 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 482   return eden()->free();
 483 }
 484 
 485 size_t DefNewGeneration::capacity_before_gc() const {
 486   return eden()->capacity();
 487 }
 488 
 489 size_t DefNewGeneration::contiguous_available() const {
 490   return eden()->free();
 491 }
 492 
 493 
 494 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 495 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 496 
 497 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 498   eden()->object_iterate(blk);
 499   from()->object_iterate(blk);
 500 }
 501 
 502 
 503 void DefNewGeneration::space_iterate(SpaceClosure* blk,
 504                                      bool usedOnly) {
 505   blk->do_space(eden());
 506   blk->do_space(from());
 507   blk->do_space(to());
 508 }
 509 
 510 // The last collection bailed out, we are running out of heap space,
 511 // so we try to allocate the from-space, too.
 512 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 513   HeapWord* result = NULL;
 514   if (Verbose && PrintGCDetails) {
 515     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
 516                         "  will_fail: %s"
 517                         "  heap_lock: %s"
 518                         "  free: " SIZE_FORMAT,
 519                         size,
 520                         GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
 521                           "true" : "false",
 522                         Heap_lock->is_locked() ? "locked" : "unlocked",
 523                         from()->free());
 524   }
 525   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
 526     if (Heap_lock->owned_by_self() ||
 527         (SafepointSynchronize::is_at_safepoint() &&
 528          Thread::current()->is_VM_thread())) {
 529       // If the Heap_lock is not locked by this thread, this will be called
 530       // again later with the Heap_lock held.
 531       result = from()->allocate(size);
 532     } else if (PrintGC && Verbose) {
 533       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
 534     }
 535   } else if (PrintGC && Verbose) {
 536     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
 537   }
 538   if (PrintGC && Verbose) {
 539     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
 540   }
 541   return result;
 542 }
 543 
 544 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 545                                                 bool   is_tlab,
 546                                                 bool   parallel) {
 547   // We don't attempt to expand the young generation (but perhaps we should.)
 548   return allocate(size, is_tlab);
 549 }
 550 
 551 void DefNewGeneration::adjust_desired_tenuring_threshold() {
 552   // Set the desired survivor size to half the real survivor space
 553   _tenuring_threshold =
 554     age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 555 }
 556 
 557 void DefNewGeneration::collect(bool   full,
 558                                bool   clear_all_soft_refs,
 559                                size_t size,
 560                                bool   is_tlab) {
 561   assert(full || size > 0, "otherwise we don't want to collect");
 562 
 563   GenCollectedHeap* gch = GenCollectedHeap::heap();
 564 
 565   _gc_timer->register_gc_start(os::elapsed_counter());
 566   DefNewTracer gc_tracer;
 567   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 568 
 569   _next_gen = gch->next_gen(this);
 570 
 571   // If the next generation is too full to accommodate promotion
 572   // from this generation, pass on collection; let the next generation
 573   // do it.
 574   if (!collection_attempt_is_safe()) {
 575     if (Verbose && PrintGCDetails) {
 576       gclog_or_tty->print(" :: Collection attempt not safe :: ");
 577     }
 578     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
 579     return;
 580   }
 581   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 582 
 583   init_assuming_no_promotion_failure();
 584 
 585   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
 586   // Capture heap used before collection (for printing).
 587   size_t gch_prev_used = gch->used();
 588 
 589   gch->trace_heap_before_gc(&gc_tracer);
 590 
 591   SpecializationStats::clear();
 592 
 593   // These can be shared for all code paths
 594   IsAliveClosure is_alive(this);
 595   ScanWeakRefClosure scan_weak_ref(this);
 596 
 597   age_table()->clear();
 598   to()->clear(SpaceDecorator::Mangle);
 599 
 600   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 601 
 602   assert(gch->no_allocs_since_save_marks(0),
 603          "save marks have not been newly set.");
 604 
 605   // Not very pretty.
 606   CollectorPolicy* cp = gch->collector_policy();
 607 
 608   FastScanClosure fsc_with_no_gc_barrier(this, false);
 609   FastScanClosure fsc_with_gc_barrier(this, true);
 610 
 611   KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
 612                                       gch->rem_set()->klass_rem_set());
 613 
 614   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 615   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
 616                                                   &fsc_with_no_gc_barrier,
 617                                                   &fsc_with_gc_barrier);
 618 
 619   assert(gch->no_allocs_since_save_marks(0),
 620          "save marks have not been newly set.");
 621 
 622   int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
 623 
 624   gch->gen_process_strong_roots(_level,
 625                                 true,  // Process younger gens, if any,
 626                                        // as strong roots.
 627                                 true,  // activate StrongRootsScope
 628                                 true,  // is scavenging
 629                                 SharedHeap::ScanningOption(so),
 630                                 &fsc_with_no_gc_barrier,
 631                                 true,   // walk *all* scavengable nmethods
 632                                 &fsc_with_gc_barrier,
 633                                 &klass_scan_closure);
 634 
 635   // "evacuate followers".
 636   evacuate_followers.do_void();
 637 
 638   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 639   ReferenceProcessor* rp = ref_processor();
 640   rp->setup_policy(clear_all_soft_refs);
 641   const ReferenceProcessorStats& stats =
 642   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 643                                     NULL, _gc_timer);
 644   gc_tracer.report_gc_reference_stats(stats);
 645 
 646   if (!_promotion_failed) {
 647     // Swap the survivor spaces.
 648     eden()->clear(SpaceDecorator::Mangle);
 649     from()->clear(SpaceDecorator::Mangle);
 650     if (ZapUnusedHeapArea) {
 651       // This is now done here because of the piece-meal mangling which
 652       // can check for valid mangling at intermediate points in the
 653       // collection(s).  When a minor collection fails to collect
 654       // sufficient space resizing of the young generation can occur
 655       // an redistribute the spaces in the young generation.  Mangle
 656       // here so that unzapped regions don't get distributed to
 657       // other spaces.
 658       to()->mangle_unused_area();
 659     }
 660     swap_spaces();
 661 
 662     assert(to()->is_empty(), "to space should be empty now");
 663 
 664     adjust_desired_tenuring_threshold();
 665 
 666     // A successful scavenge should restart the GC time limit count which is
 667     // for full GC's.
 668     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 669     size_policy->reset_gc_overhead_limit_count();
 670     if (PrintGC && !PrintGCDetails) {
 671       gch->print_heap_change(gch_prev_used);
 672     }
 673     assert(!gch->incremental_collection_failed(), "Should be clear");
 674   } else {
 675     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 676     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 677 
 678     remove_forwarding_pointers();
 679     if (PrintGCDetails) {
 680       gclog_or_tty->print(" (promotion failed) ");
 681     }
 682     // Add to-space to the list of space to compact
 683     // when a promotion failure has occurred.  In that
 684     // case there can be live objects in to-space
 685     // as a result of a partial evacuation of eden
 686     // and from-space.
 687     swap_spaces();   // For uniformity wrt ParNewGeneration.
 688     from()->set_next_compaction_space(to());
 689     gch->set_incremental_collection_failed();
 690 
 691     // Inform the next generation that a promotion failure occurred.
 692     _next_gen->promotion_failure_occurred();
 693     gc_tracer.report_promotion_failed(_promotion_failed_info);
 694 
 695     // Reset the PromotionFailureALot counters.
 696     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 697   }
 698   // set new iteration safe limit for the survivor spaces
 699   from()->set_concurrent_iteration_safe_limit(from()->top());
 700   to()->set_concurrent_iteration_safe_limit(to()->top());
 701   SpecializationStats::print();
 702 
 703   // We need to use a monotonically non-decreasing time in ms
 704   // or we will see time-warp warnings and os::javaTimeMillis()
 705   // does not guarantee monotonicity.
 706   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 707   update_time_of_last_gc(now);
 708 
 709   gch->trace_heap_after_gc(&gc_tracer);
 710   gc_tracer.report_tenuring_threshold(tenuring_threshold());
 711 
 712   _gc_timer->register_gc_end(os::elapsed_counter());
 713 
 714   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 715 }
 716 
 717 class RemoveForwardPointerClosure: public ObjectClosure {
 718 public:
 719   void do_object(oop obj) {
 720     obj->init_mark();
 721   }
 722 };
 723 
 724 void DefNewGeneration::init_assuming_no_promotion_failure() {
 725   _promotion_failed = false;
 726   _promotion_failed_info.reset();
 727   from()->set_next_compaction_space(NULL);
 728 }
 729 
 730 void DefNewGeneration::remove_forwarding_pointers() {
 731   RemoveForwardPointerClosure rspc;
 732   eden()->object_iterate(&rspc);
 733   from()->object_iterate(&rspc);
 734 
 735   // Now restore saved marks, if any.
 736   assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
 737          "should be the same");
 738   while (!_objs_with_preserved_marks.is_empty()) {
 739     oop obj   = _objs_with_preserved_marks.pop();
 740     markOop m = _preserved_marks_of_objs.pop();
 741     obj->set_mark(m);
 742   }
 743   _objs_with_preserved_marks.clear(true);
 744   _preserved_marks_of_objs.clear(true);
 745 }
 746 
 747 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
 748   assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
 749          "Oversaving!");
 750   _objs_with_preserved_marks.push(obj);
 751   _preserved_marks_of_objs.push(m);
 752 }
 753 
 754 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
 755   if (m->must_be_preserved_for_promotion_failure(obj)) {
 756     preserve_mark(obj, m);
 757   }
 758 }
 759 
 760 void DefNewGeneration::handle_promotion_failure(oop old) {
 761   if (PrintPromotionFailure && !_promotion_failed) {
 762     gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
 763                         old->size());
 764   }
 765   _promotion_failed = true;
 766   _promotion_failed_info.register_copy_failure(old->size());
 767   preserve_mark_if_necessary(old, old->mark());
 768   // forward to self
 769   old->forward_to(old);
 770 
 771   _promo_failure_scan_stack.push(old);
 772 
 773   if (!_promo_failure_drain_in_progress) {
 774     // prevent recursion in copy_to_survivor_space()
 775     _promo_failure_drain_in_progress = true;
 776     drain_promo_failure_scan_stack();
 777     _promo_failure_drain_in_progress = false;
 778   }
 779 }
 780 
 781 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 782   assert(is_in_reserved(old) && !old->is_forwarded(),
 783          "shouldn't be scavenging this oop");
 784   size_t s = old->size();
 785   oop obj = NULL;
 786 
 787   // Try allocating obj in to-space (unless too old)
 788   if (old->age() < tenuring_threshold()) {
 789     obj = (oop) to()->allocate(s);
 790   }
 791 
 792   // Otherwise try allocating obj tenured
 793   if (obj == NULL) {
 794     obj = _next_gen->promote(old, s);
 795     if (obj == NULL) {
 796       handle_promotion_failure(old);
 797       return old;
 798     }
 799   } else {
 800     // Prefetch beyond obj
 801     const intx interval = PrefetchCopyIntervalInBytes;
 802     Prefetch::write(obj, interval);
 803 
 804     // Copy obj
 805     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
 806 
 807     // Increment age if obj still in new generation
 808     obj->incr_age();
 809     age_table()->add(obj, s);
 810   }
 811 
 812   // Done, insert forward pointer to obj in this header
 813   old->forward_to(obj);
 814 
 815   return obj;
 816 }
 817 
 818 void DefNewGeneration::drain_promo_failure_scan_stack() {
 819   while (!_promo_failure_scan_stack.is_empty()) {
 820      oop obj = _promo_failure_scan_stack.pop();
 821      obj->oop_iterate(_promo_failure_scan_stack_closure);
 822   }
 823 }
 824 
 825 void DefNewGeneration::save_marks() {
 826   eden()->set_saved_mark();
 827   to()->set_saved_mark();
 828   from()->set_saved_mark();
 829 }
 830 
 831 
 832 void DefNewGeneration::reset_saved_marks() {
 833   eden()->reset_saved_mark();
 834   to()->reset_saved_mark();
 835   from()->reset_saved_mark();
 836 }
 837 
 838 
 839 bool DefNewGeneration::no_allocs_since_save_marks() {
 840   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 841   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 842   return to()->saved_mark_at_top();
 843 }
 844 
 845 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
 846                                                                 \
 847 void DefNewGeneration::                                         \
 848 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
 849   cl->set_generation(this);                                     \
 850   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 851   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
 852   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 853   cl->reset_generation();                                       \
 854   save_marks();                                                 \
 855 }
 856 
 857 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
 858 
 859 #undef DefNew_SINCE_SAVE_MARKS_DEFN
 860 
 861 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
 862                                          size_t max_alloc_words) {
 863   if (requestor == this || _promotion_failed) return;
 864   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
 865 
 866   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
 867   if (to_space->top() > to_space->bottom()) {
 868     trace("to_space not empty when contribute_scratch called");
 869   }
 870   */
 871 
 872   ContiguousSpace* to_space = to();
 873   assert(to_space->end() >= to_space->top(), "pointers out of order");
 874   size_t free_words = pointer_delta(to_space->end(), to_space->top());
 875   if (free_words >= MinFreeScratchWords) {
 876     ScratchBlock* sb = (ScratchBlock*)to_space->top();
 877     sb->num_words = free_words;
 878     sb->next = list;
 879     list = sb;
 880   }
 881 }
 882 
 883 void DefNewGeneration::reset_scratch() {
 884   // If contributing scratch in to_space, mangle all of
 885   // to_space if ZapUnusedHeapArea.  This is needed because
 886   // top is not maintained while using to-space as scratch.
 887   if (ZapUnusedHeapArea) {
 888     to()->mangle_unused_area_complete();
 889   }
 890 }
 891 
 892 bool DefNewGeneration::collection_attempt_is_safe() {
 893   if (!to()->is_empty()) {
 894     if (Verbose && PrintGCDetails) {
 895       gclog_or_tty->print(" :: to is not empty :: ");
 896     }
 897     return false;
 898   }
 899   if (_next_gen == NULL) {
 900     GenCollectedHeap* gch = GenCollectedHeap::heap();
 901     _next_gen = gch->next_gen(this);
 902   }
 903   return _next_gen->promotion_attempt_is_safe(used());
 904 }
 905 
 906 void DefNewGeneration::gc_epilogue(bool full) {
 907   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
 908 
 909   assert(!GC_locker::is_active(), "We should not be executing here");
 910   // Check if the heap is approaching full after a collection has
 911   // been done.  Generally the young generation is empty at
 912   // a minimum at the end of a collection.  If it is not, then
 913   // the heap is approaching full.
 914   GenCollectedHeap* gch = GenCollectedHeap::heap();
 915   if (full) {
 916     DEBUG_ONLY(seen_incremental_collection_failed = false;)
 917     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
 918       if (Verbose && PrintGCDetails) {
 919         gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
 920                             GCCause::to_string(gch->gc_cause()));
 921       }
 922       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
 923       set_should_allocate_from_space(); // we seem to be running out of space
 924     } else {
 925       if (Verbose && PrintGCDetails) {
 926         gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
 927                             GCCause::to_string(gch->gc_cause()));
 928       }
 929       gch->clear_incremental_collection_failed(); // We just did a full collection
 930       clear_should_allocate_from_space(); // if set
 931     }
 932   } else {
 933 #ifdef ASSERT
 934     // It is possible that incremental_collection_failed() == true
 935     // here, because an attempted scavenge did not succeed. The policy
 936     // is normally expected to cause a full collection which should
 937     // clear that condition, so we should not be here twice in a row
 938     // with incremental_collection_failed() == true without having done
 939     // a full collection in between.
 940     if (!seen_incremental_collection_failed &&
 941         gch->incremental_collection_failed()) {
 942       if (Verbose && PrintGCDetails) {
 943         gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
 944                             GCCause::to_string(gch->gc_cause()));
 945       }
 946       seen_incremental_collection_failed = true;
 947     } else if (seen_incremental_collection_failed) {
 948       if (Verbose && PrintGCDetails) {
 949         gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
 950                             GCCause::to_string(gch->gc_cause()));
 951       }
 952       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
 953              (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
 954              !gch->incremental_collection_failed(),
 955              "Twice in a row");
 956       seen_incremental_collection_failed = false;
 957     }
 958 #endif // ASSERT
 959   }
 960 
 961   if (ZapUnusedHeapArea) {
 962     eden()->check_mangled_unused_area_complete();
 963     from()->check_mangled_unused_area_complete();
 964     to()->check_mangled_unused_area_complete();
 965   }
 966 
 967   if (!CleanChunkPoolAsync) {
 968     Chunk::clean_chunk_pool();
 969   }
 970 
 971   // update the generation and space performance counters
 972   update_counters();
 973   gch->collector_policy()->counters()->update_counters();
 974 }
 975 
 976 void DefNewGeneration::record_spaces_top() {
 977   assert(ZapUnusedHeapArea, "Not mangling unused space");
 978   eden()->set_top_for_allocations();
 979   to()->set_top_for_allocations();
 980   from()->set_top_for_allocations();
 981 }
 982 
 983 void DefNewGeneration::ref_processor_init() {
 984   Generation::ref_processor_init();
 985 }
 986 
 987 
 988 void DefNewGeneration::update_counters() {
 989   if (UsePerfData) {
 990     _eden_counters->update_all();
 991     _from_counters->update_all();
 992     _to_counters->update_all();
 993     _gen_counters->update_all();
 994   }
 995 }
 996 
 997 void DefNewGeneration::verify() {
 998   eden()->verify();
 999   from()->verify();
1000     to()->verify();
1001 }
1002 
1003 void DefNewGeneration::print_on(outputStream* st) const {
1004   Generation::print_on(st);
1005   st->print("  eden");
1006   eden()->print_on(st);
1007   st->print("  from");
1008   from()->print_on(st);
1009   st->print("  to  ");
1010   to()->print_on(st);
1011 }
1012 
1013 
1014 const char* DefNewGeneration::name() const {
1015   return "def new generation";
1016 }
1017 
1018 // Moved from inline file as they are not called inline
1019 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
1020   return eden();
1021 }
1022 
1023 HeapWord* DefNewGeneration::allocate(size_t word_size,
1024                                      bool is_tlab) {
1025   // This is the slow-path allocation for the DefNewGeneration.
1026   // Most allocations are fast-path in compiled code.
1027   // We try to allocate from the eden.  If that works, we are happy.
1028   // Note that since DefNewGeneration supports lock-free allocation, we
1029   // have to use it here, as well.
1030   HeapWord* result = eden()->par_allocate(word_size);
1031   if (result != NULL) {
1032     if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1033       _next_gen->sample_eden_chunk();
1034     }
1035     return result;
1036   }
1037   do {
1038     HeapWord* old_limit = eden()->soft_end();
1039     if (old_limit < eden()->end()) {
1040       // Tell the next generation we reached a limit.
1041       HeapWord* new_limit =
1042         next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
1043       if (new_limit != NULL) {
1044         Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
1045       } else {
1046         assert(eden()->soft_end() == eden()->end(),
1047                "invalid state after allocation_limit_reached returned null");
1048       }
1049     } else {
1050       // The allocation failed and the soft limit is equal to the hard limit,
1051       // there are no reasons to do an attempt to allocate
1052       assert(old_limit == eden()->end(), "sanity check");
1053       break;
1054     }
1055     // Try to allocate until succeeded or the soft limit can't be adjusted
1056     result = eden()->par_allocate(word_size);
1057   } while (result == NULL);
1058 
1059   // If the eden is full and the last collection bailed out, we are running
1060   // out of heap space, and we try to allocate the from-space, too.
1061   // allocate_from_space can't be inlined because that would introduce a
1062   // circular dependency at compile time.
1063   if (result == NULL) {
1064     result = allocate_from_space(word_size);
1065   } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1066     _next_gen->sample_eden_chunk();
1067   }
1068   return result;
1069 }
1070 
1071 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1072                                          bool is_tlab) {
1073   HeapWord* res = eden()->par_allocate(word_size);
1074   if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1075     _next_gen->sample_eden_chunk();
1076   }
1077   return res;
1078 }
1079 
1080 void DefNewGeneration::gc_prologue(bool full) {
1081   // Ensure that _end and _soft_end are the same in eden space.
1082   eden()->set_soft_end(eden()->end());
1083 }
1084 
1085 size_t DefNewGeneration::tlab_capacity() const {
1086   return eden()->capacity();
1087 }
1088 
1089 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1090   return unsafe_max_alloc_nogc();
1091 }