1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/serial/defNewGeneration.inline.hpp"
  27 #include "gc/shared/adaptiveSizePolicy.hpp"
  28 #include "gc/shared/ageTable.inline.hpp"
  29 #include "gc/shared/cardTableRS.hpp"
  30 #include "gc/shared/collectorCounters.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcLocker.hpp"
  33 #include "gc/shared/gcPolicyCounters.hpp"
  34 #include "gc/shared/gcTimer.hpp"
  35 #include "gc/shared/gcTrace.hpp"
  36 #include "gc/shared/gcTraceTime.inline.hpp"
  37 #include "gc/shared/genCollectedHeap.hpp"
  38 #include "gc/shared/genOopClosures.inline.hpp"
  39 #include "gc/shared/generationSpec.hpp"
  40 #include "gc/shared/preservedMarks.inline.hpp"
  41 #include "gc/shared/referencePolicy.hpp"
  42 #include "gc/shared/space.inline.hpp"
  43 #include "gc/shared/spaceDecorator.hpp"
  44 #include "gc/shared/strongRootsScope.hpp"
  45 #include "gc/shared/weakProcessor.hpp"
  46 #include "logging/log.hpp"
  47 #include "memory/iterator.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "oops/instanceRefKlass.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/atomic.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/prefetch.inline.hpp"
  54 #include "runtime/thread.inline.hpp"
  55 #include "utilities/align.hpp"
  56 #include "utilities/copy.hpp"
  57 #include "utilities/globalDefinitions.hpp"
  58 #include "utilities/stack.inline.hpp"
  59 #if INCLUDE_ALL_GCS
  60 #include "gc/cms/parOopClosures.hpp"
  61 #endif
  62 
  63 //
  64 // DefNewGeneration functions.
  65 
  66 // Methods of protected closure types.
  67 
  68 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
  69   assert(_young_gen->kind() == Generation::ParNew ||
  70          _young_gen->kind() == Generation::DefNew, "Expected the young generation here");
  71 }
  72 
  73 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
  74   return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded();
  75 }
  76 
  77 DefNewGeneration::KeepAliveClosure::
  78 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
  79   _rs = GenCollectedHeap::heap()->rem_set();
  80 }
  81 
  82 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  83 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
  84 
  85 
  86 DefNewGeneration::FastKeepAliveClosure::
  87 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
  88   DefNewGeneration::KeepAliveClosure(cl) {
  89   _boundary = g->reserved().end();
  90 }
  91 
  92 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  93 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
  94 
  95 DefNewGeneration::EvacuateFollowersClosure::
  96 EvacuateFollowersClosure(GenCollectedHeap* gch,
  97                          ScanClosure* cur,
  98                          ScanClosure* older) :
  99   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
 100 {}
 101 
 102 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
 103   do {
 104     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
 105   } while (!_gch->no_allocs_since_save_marks());
 106 }
 107 
 108 DefNewGeneration::FastEvacuateFollowersClosure::
 109 FastEvacuateFollowersClosure(GenCollectedHeap* gch,
 110                              FastScanClosure* cur,
 111                              FastScanClosure* older) :
 112   _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older)
 113 {
 114   assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew");
 115   _young_gen = (DefNewGeneration*)_gch->young_gen();
 116 }
 117 
 118 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 119   do {
 120     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older);
 121   } while (!_gch->no_allocs_since_save_marks());
 122   guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 123 }
 124 
 125 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
 126     OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 127 {
 128   _boundary = _g->reserved().end();
 129 }
 130 
 131 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
 132 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
 133 
 134 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
 135     OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 136 {
 137   _boundary = _g->reserved().end();
 138 }
 139 
 140 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
 141 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 142 
 143 void CLDScanClosure::do_cld(ClassLoaderData* cld) {
 144   NOT_PRODUCT(ResourceMark rm);
 145   log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s",
 146                                   p2i(cld),
 147                                   cld->loader_name(),
 148                                   cld->has_modified_oops() ? "true" : "false");
 149 
 150   // If the cld has not been dirtied we know that there's
 151   // no references into  the young gen and we can skip it.
 152   if (cld->has_modified_oops()) {
 153     if (_accumulate_modified_oops) {
 154       cld->accumulate_modified_oops();
 155     }
 156 
 157     // Tell the closure which CLD is being scanned so that it can be dirtied
 158     // if oops are left pointing into the young gen.
 159     _scavenge_closure->set_scanned_cld(cld);
 160 
 161     // Clean the cld since we're going to scavenge all the metadata.
 162     cld->oops_do(_scavenge_closure, false, /*clear_modified_oops*/true);
 163 
 164     _scavenge_closure->set_scanned_cld(NULL);
 165   }
 166 }
 167 
 168 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 169   _g(g)
 170 {
 171   _boundary = _g->reserved().end();
 172 }
 173 
 174 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
 175 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
 176 
 177 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
 178 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
 179 
 180 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 181                                    size_t initial_size,
 182                                    const char* policy)
 183   : Generation(rs, initial_size),
 184     _preserved_marks_set(false /* in_c_heap */),
 185     _promo_failure_drain_in_progress(false),
 186     _should_allocate_from_space(false)
 187 {
 188   MemRegion cmr((HeapWord*)_virtual_space.low(),
 189                 (HeapWord*)_virtual_space.high());
 190   GenCollectedHeap* gch = GenCollectedHeap::heap();
 191 
 192   gch->rem_set()->resize_covered_region(cmr);
 193 
 194   _eden_space = new ContiguousSpace();
 195   _from_space = new ContiguousSpace();
 196   _to_space   = new ContiguousSpace();
 197 
 198   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
 199     vm_exit_during_initialization("Could not allocate a new gen space");
 200   }
 201 
 202   // Compute the maximum eden and survivor space sizes. These sizes
 203   // are computed assuming the entire reserved space is committed.
 204   // These values are exported as performance counters.
 205   uintx alignment = gch->collector_policy()->space_alignment();
 206   uintx size = _virtual_space.reserved_size();
 207   _max_survivor_size = compute_survivor_size(size, alignment);
 208   _max_eden_size = size - (2*_max_survivor_size);
 209 
 210   // allocate the performance counters
 211   GenCollectorPolicy* gcp = gch->gen_policy();
 212 
 213   // Generation counters -- generation 0, 3 subspaces
 214   _gen_counters = new GenerationCounters("new", 0, 3,
 215       gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
 216   _gc_counters = new CollectorCounters(policy, 0);
 217 
 218   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 219                                       _gen_counters);
 220   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 221                                       _gen_counters);
 222   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 223                                     _gen_counters);
 224 
 225   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 226   update_counters();
 227   _old_gen = NULL;
 228   _tenuring_threshold = MaxTenuringThreshold;
 229   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 230 
 231   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 232 }
 233 
 234 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 235                                                 bool clear_space,
 236                                                 bool mangle_space) {
 237   uintx alignment =
 238     GenCollectedHeap::heap()->collector_policy()->space_alignment();
 239 
 240   // If the spaces are being cleared (only done at heap initialization
 241   // currently), the survivor spaces need not be empty.
 242   // Otherwise, no care is taken for used areas in the survivor spaces
 243   // so check.
 244   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 245     "Initialization of the survivor spaces assumes these are empty");
 246 
 247   // Compute sizes
 248   uintx size = _virtual_space.committed_size();
 249   uintx survivor_size = compute_survivor_size(size, alignment);
 250   uintx eden_size = size - (2*survivor_size);
 251   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 252 
 253   if (eden_size < minimum_eden_size) {
 254     // May happen due to 64Kb rounding, if so adjust eden size back up
 255     minimum_eden_size = align_up(minimum_eden_size, alignment);
 256     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 257     uintx unaligned_survivor_size =
 258       align_down(maximum_survivor_size, alignment);
 259     survivor_size = MAX2(unaligned_survivor_size, alignment);
 260     eden_size = size - (2*survivor_size);
 261     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 262     assert(eden_size >= minimum_eden_size, "just checking");
 263   }
 264 
 265   char *eden_start = _virtual_space.low();
 266   char *from_start = eden_start + eden_size;
 267   char *to_start   = from_start + survivor_size;
 268   char *to_end     = to_start   + survivor_size;
 269 
 270   assert(to_end == _virtual_space.high(), "just checking");
 271   assert(Space::is_aligned(eden_start), "checking alignment");
 272   assert(Space::is_aligned(from_start), "checking alignment");
 273   assert(Space::is_aligned(to_start),   "checking alignment");
 274 
 275   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 276   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 277   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 278 
 279   // A minimum eden size implies that there is a part of eden that
 280   // is being used and that affects the initialization of any
 281   // newly formed eden.
 282   bool live_in_eden = minimum_eden_size > 0;
 283 
 284   // If not clearing the spaces, do some checking to verify that
 285   // the space are already mangled.
 286   if (!clear_space) {
 287     // Must check mangling before the spaces are reshaped.  Otherwise,
 288     // the bottom or end of one space may have moved into another
 289     // a failure of the check may not correctly indicate which space
 290     // is not properly mangled.
 291     if (ZapUnusedHeapArea) {
 292       HeapWord* limit = (HeapWord*) _virtual_space.high();
 293       eden()->check_mangled_unused_area(limit);
 294       from()->check_mangled_unused_area(limit);
 295         to()->check_mangled_unused_area(limit);
 296     }
 297   }
 298 
 299   // Reset the spaces for their new regions.
 300   eden()->initialize(edenMR,
 301                      clear_space && !live_in_eden,
 302                      SpaceDecorator::Mangle);
 303   // If clear_space and live_in_eden, we will not have cleared any
 304   // portion of eden above its top. This can cause newly
 305   // expanded space not to be mangled if using ZapUnusedHeapArea.
 306   // We explicitly do such mangling here.
 307   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
 308     eden()->mangle_unused_area();
 309   }
 310   from()->initialize(fromMR, clear_space, mangle_space);
 311   to()->initialize(toMR, clear_space, mangle_space);
 312 
 313   // Set next compaction spaces.
 314   eden()->set_next_compaction_space(from());
 315   // The to-space is normally empty before a compaction so need
 316   // not be considered.  The exception is during promotion
 317   // failure handling when to-space can contain live objects.
 318   from()->set_next_compaction_space(NULL);
 319 }
 320 
 321 void DefNewGeneration::swap_spaces() {
 322   ContiguousSpace* s = from();
 323   _from_space        = to();
 324   _to_space          = s;
 325   eden()->set_next_compaction_space(from());
 326   // The to-space is normally empty before a compaction so need
 327   // not be considered.  The exception is during promotion
 328   // failure handling when to-space can contain live objects.
 329   from()->set_next_compaction_space(NULL);
 330 
 331   if (UsePerfData) {
 332     CSpaceCounters* c = _from_counters;
 333     _from_counters = _to_counters;
 334     _to_counters = c;
 335   }
 336 }
 337 
 338 bool DefNewGeneration::expand(size_t bytes) {
 339   MutexLocker x(ExpandHeap_lock);
 340   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
 341   bool success = _virtual_space.expand_by(bytes);
 342   if (success && ZapUnusedHeapArea) {
 343     // Mangle newly committed space immediately because it
 344     // can be done here more simply that after the new
 345     // spaces have been computed.
 346     HeapWord* new_high = (HeapWord*) _virtual_space.high();
 347     MemRegion mangle_region(prev_high, new_high);
 348     SpaceMangler::mangle_region(mangle_region);
 349   }
 350 
 351   // Do not attempt an expand-to-the reserve size.  The
 352   // request should properly observe the maximum size of
 353   // the generation so an expand-to-reserve should be
 354   // unnecessary.  Also a second call to expand-to-reserve
 355   // value potentially can cause an undue expansion.
 356   // For example if the first expand fail for unknown reasons,
 357   // but the second succeeds and expands the heap to its maximum
 358   // value.
 359   if (GCLocker::is_active()) {
 360     log_debug(gc)("Garbage collection disabled, expanded heap instead");
 361   }
 362 
 363   return success;
 364 }
 365 
 366 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
 367                                                     size_t new_size_before,
 368                                                     size_t alignment) const {
 369   size_t desired_new_size = new_size_before;
 370 
 371   if (NewSizeThreadIncrease > 0) {
 372     int threads_count;
 373     size_t thread_increase_size = 0;
 374 
 375     // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'.
 376     threads_count = Threads::number_of_non_daemon_threads();
 377     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
 378       thread_increase_size = threads_count * NewSizeThreadIncrease;
 379 
 380       // 2. Check an overflow at 'new_size_candidate + thread_increase_size'.
 381       if (new_size_candidate <= max_uintx - thread_increase_size) {
 382         new_size_candidate += thread_increase_size;
 383 
 384         // 3. Check an overflow at 'align_up'.
 385         size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
 386         if (new_size_candidate <= aligned_max) {
 387           desired_new_size = align_up(new_size_candidate, alignment);
 388         }
 389       }
 390     }
 391   }
 392 
 393   return desired_new_size;
 394 }
 395 
 396 void DefNewGeneration::compute_new_size() {
 397   // This is called after a GC that includes the old generation, so from-space
 398   // will normally be empty.
 399   // Note that we check both spaces, since if scavenge failed they revert roles.
 400   // If not we bail out (otherwise we would have to relocate the objects).
 401   if (!from()->is_empty() || !to()->is_empty()) {
 402     return;
 403   }
 404 
 405   GenCollectedHeap* gch = GenCollectedHeap::heap();
 406 
 407   size_t old_size = gch->old_gen()->capacity();
 408   size_t new_size_before = _virtual_space.committed_size();
 409   size_t min_new_size = initial_size();
 410   size_t max_new_size = reserved().byte_size();
 411   assert(min_new_size <= new_size_before &&
 412          new_size_before <= max_new_size,
 413          "just checking");
 414   // All space sizes must be multiples of Generation::GenGrain.
 415   size_t alignment = Generation::GenGrain;
 416 
 417   int threads_count = 0;
 418   size_t thread_increase_size = 0;
 419 
 420   size_t new_size_candidate = old_size / NewRatio;
 421   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
 422   // and reverts to previous value if any overflow happens
 423   size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment);
 424 
 425   // Adjust new generation size
 426   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
 427   assert(desired_new_size <= max_new_size, "just checking");
 428 
 429   bool changed = false;
 430   if (desired_new_size > new_size_before) {
 431     size_t change = desired_new_size - new_size_before;
 432     assert(change % alignment == 0, "just checking");
 433     if (expand(change)) {
 434        changed = true;
 435     }
 436     // If the heap failed to expand to the desired size,
 437     // "changed" will be false.  If the expansion failed
 438     // (and at this point it was expected to succeed),
 439     // ignore the failure (leaving "changed" as false).
 440   }
 441   if (desired_new_size < new_size_before && eden()->is_empty()) {
 442     // bail out of shrinking if objects in eden
 443     size_t change = new_size_before - desired_new_size;
 444     assert(change % alignment == 0, "just checking");
 445     _virtual_space.shrink_by(change);
 446     changed = true;
 447   }
 448   if (changed) {
 449     // The spaces have already been mangled at this point but
 450     // may not have been cleared (set top = bottom) and should be.
 451     // Mangling was done when the heap was being expanded.
 452     compute_space_boundaries(eden()->used(),
 453                              SpaceDecorator::Clear,
 454                              SpaceDecorator::DontMangle);
 455     MemRegion cmr((HeapWord*)_virtual_space.low(),
 456                   (HeapWord*)_virtual_space.high());
 457     gch->rem_set()->resize_covered_region(cmr);
 458 
 459     log_debug(gc, ergo, heap)(
 460         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
 461         new_size_before/K, _virtual_space.committed_size()/K,
 462         eden()->capacity()/K, from()->capacity()/K);
 463     log_trace(gc, ergo, heap)(
 464         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
 465           thread_increase_size/K, threads_count);
 466       }
 467 }
 468 
 469 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
 470   assert(false, "NYI -- are you sure you want to call this?");
 471 }
 472 
 473 
 474 size_t DefNewGeneration::capacity() const {
 475   return eden()->capacity()
 476        + from()->capacity();  // to() is only used during scavenge
 477 }
 478 
 479 
 480 size_t DefNewGeneration::used() const {
 481   return eden()->used()
 482        + from()->used();      // to() is only used during scavenge
 483 }
 484 
 485 
 486 size_t DefNewGeneration::free() const {
 487   return eden()->free()
 488        + from()->free();      // to() is only used during scavenge
 489 }
 490 
 491 size_t DefNewGeneration::max_capacity() const {
 492   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
 493   const size_t reserved_bytes = reserved().byte_size();
 494   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
 495 }
 496 
 497 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 498   return eden()->free();
 499 }
 500 
 501 size_t DefNewGeneration::capacity_before_gc() const {
 502   return eden()->capacity();
 503 }
 504 
 505 size_t DefNewGeneration::contiguous_available() const {
 506   return eden()->free();
 507 }
 508 
 509 
 510 HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 511 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 512 
 513 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 514   eden()->object_iterate(blk);
 515   from()->object_iterate(blk);
 516 }
 517 
 518 
 519 void DefNewGeneration::space_iterate(SpaceClosure* blk,
 520                                      bool usedOnly) {
 521   blk->do_space(eden());
 522   blk->do_space(from());
 523   blk->do_space(to());
 524 }
 525 
 526 // The last collection bailed out, we are running out of heap space,
 527 // so we try to allocate the from-space, too.
 528 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 529   bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
 530 
 531   // If the Heap_lock is not locked by this thread, this will be called
 532   // again later with the Heap_lock held.
 533   bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
 534 
 535   HeapWord* result = NULL;
 536   if (do_alloc) {
 537     result = from()->allocate(size);
 538   }
 539 
 540   log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
 541                         size,
 542                         GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
 543                           "true" : "false",
 544                         Heap_lock->is_locked() ? "locked" : "unlocked",
 545                         from()->free(),
 546                         should_try_alloc ? "" : "  should_allocate_from_space: NOT",
 547                         do_alloc ? "  Heap_lock is not owned by self" : "",
 548                         result == NULL ? "NULL" : "object");
 549 
 550   return result;
 551 }
 552 
 553 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 554                                                 bool   is_tlab,
 555                                                 bool   parallel) {
 556   // We don't attempt to expand the young generation (but perhaps we should.)
 557   return allocate(size, is_tlab);
 558 }
 559 
 560 void DefNewGeneration::adjust_desired_tenuring_threshold() {
 561   // Set the desired survivor size to half the real survivor space
 562   size_t const survivor_capacity = to()->capacity() / HeapWordSize;
 563   size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
 564 
 565   _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
 566 
 567   if (UsePerfData) {
 568     GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters();
 569     gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
 570     gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
 571   }
 572 
 573   age_table()->print_age_table(_tenuring_threshold);
 574 }
 575 
 576 void DefNewGeneration::collect(bool   full,
 577                                bool   clear_all_soft_refs,
 578                                size_t size,
 579                                bool   is_tlab) {
 580   assert(full || size > 0, "otherwise we don't want to collect");
 581 
 582   GenCollectedHeap* gch = GenCollectedHeap::heap();
 583 
 584   _gc_timer->register_gc_start();
 585   DefNewTracer gc_tracer;
 586   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 587 
 588   _old_gen = gch->old_gen();
 589 
 590   // If the next generation is too full to accommodate promotion
 591   // from this generation, pass on collection; let the next generation
 592   // do it.
 593   if (!collection_attempt_is_safe()) {
 594     log_trace(gc)(":: Collection attempt not safe ::");
 595     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
 596     return;
 597   }
 598   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 599 
 600   init_assuming_no_promotion_failure();
 601 
 602   GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, gch->gc_cause());
 603 
 604   gch->trace_heap_before_gc(&gc_tracer);
 605 
 606   // These can be shared for all code paths
 607   IsAliveClosure is_alive(this);
 608   ScanWeakRefClosure scan_weak_ref(this);
 609 
 610   age_table()->clear();
 611   to()->clear(SpaceDecorator::Mangle);
 612   // The preserved marks should be empty at the start of the GC.
 613   _preserved_marks_set.init(1);
 614 
 615   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 616 
 617   assert(gch->no_allocs_since_save_marks(),
 618          "save marks have not been newly set.");
 619 
 620   FastScanClosure fsc_with_no_gc_barrier(this, false);
 621   FastScanClosure fsc_with_gc_barrier(this, true);
 622 
 623   CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier,
 624                                   gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
 625 
 626   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 627   FastEvacuateFollowersClosure evacuate_followers(gch,
 628                                                   &fsc_with_no_gc_barrier,
 629                                                   &fsc_with_gc_barrier);
 630 
 631   assert(gch->no_allocs_since_save_marks(),
 632          "save marks have not been newly set.");
 633 
 634   {
 635     // DefNew needs to run with n_threads == 0, to make sure the serial
 636     // version of the card table scanning code is used.
 637     // See: CardTableRS::non_clean_card_iterate_possibly_parallel.
 638     StrongRootsScope srs(0);
 639 
 640     gch->young_process_roots(&srs,
 641                              &fsc_with_no_gc_barrier,
 642                              &fsc_with_gc_barrier,
 643                              &cld_scan_closure);
 644   }
 645 
 646   // "evacuate followers".
 647   evacuate_followers.do_void();
 648 
 649   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 650   ReferenceProcessor* rp = ref_processor();
 651   rp->setup_policy(clear_all_soft_refs);
 652   ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
 653   const ReferenceProcessorStats& stats =
 654   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 655                                     NULL, &pt);
 656   gc_tracer.report_gc_reference_stats(stats);
 657   gc_tracer.report_tenuring_threshold(tenuring_threshold());
 658   pt.print_all_references();
 659 
 660   assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set.");
 661 
 662   WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
 663 
 664   // Verify that the usage of keep_alive didn't copy any objects.
 665   assert(gch->no_allocs_since_save_marks(), "save marks have not been newly set.");
 666 
 667   if (!_promotion_failed) {
 668     // Swap the survivor spaces.
 669     eden()->clear(SpaceDecorator::Mangle);
 670     from()->clear(SpaceDecorator::Mangle);
 671     if (ZapUnusedHeapArea) {
 672       // This is now done here because of the piece-meal mangling which
 673       // can check for valid mangling at intermediate points in the
 674       // collection(s).  When a young collection fails to collect
 675       // sufficient space resizing of the young generation can occur
 676       // an redistribute the spaces in the young generation.  Mangle
 677       // here so that unzapped regions don't get distributed to
 678       // other spaces.
 679       to()->mangle_unused_area();
 680     }
 681     swap_spaces();
 682 
 683     assert(to()->is_empty(), "to space should be empty now");
 684 
 685     adjust_desired_tenuring_threshold();
 686 
 687     // A successful scavenge should restart the GC time limit count which is
 688     // for full GC's.
 689     AdaptiveSizePolicy* size_policy = gch->size_policy();
 690     size_policy->reset_gc_overhead_limit_count();
 691     assert(!gch->incremental_collection_failed(), "Should be clear");
 692   } else {
 693     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 694     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 695 
 696     remove_forwarding_pointers();
 697     log_info(gc, promotion)("Promotion failed");
 698     // Add to-space to the list of space to compact
 699     // when a promotion failure has occurred.  In that
 700     // case there can be live objects in to-space
 701     // as a result of a partial evacuation of eden
 702     // and from-space.
 703     swap_spaces();   // For uniformity wrt ParNewGeneration.
 704     from()->set_next_compaction_space(to());
 705     gch->set_incremental_collection_failed();
 706 
 707     // Inform the next generation that a promotion failure occurred.
 708     _old_gen->promotion_failure_occurred();
 709     gc_tracer.report_promotion_failed(_promotion_failed_info);
 710 
 711     // Reset the PromotionFailureALot counters.
 712     NOT_PRODUCT(gch->reset_promotion_should_fail();)
 713   }
 714   // We should have processed and cleared all the preserved marks.
 715   _preserved_marks_set.reclaim();
 716   // set new iteration safe limit for the survivor spaces
 717   from()->set_concurrent_iteration_safe_limit(from()->top());
 718   to()->set_concurrent_iteration_safe_limit(to()->top());
 719 
 720   // We need to use a monotonically non-decreasing time in ms
 721   // or we will see time-warp warnings and os::javaTimeMillis()
 722   // does not guarantee monotonicity.
 723   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 724   update_time_of_last_gc(now);
 725 
 726   gch->trace_heap_after_gc(&gc_tracer);
 727 
 728   _gc_timer->register_gc_end();
 729 
 730   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 731 }
 732 
 733 void DefNewGeneration::init_assuming_no_promotion_failure() {
 734   _promotion_failed = false;
 735   _promotion_failed_info.reset();
 736   from()->set_next_compaction_space(NULL);
 737 }
 738 
 739 void DefNewGeneration::remove_forwarding_pointers() {
 740   RemoveForwardedPointerClosure rspc;
 741   eden()->object_iterate(&rspc);
 742   from()->object_iterate(&rspc);
 743   restore_preserved_marks();
 744 }
 745 
 746 void DefNewGeneration::restore_preserved_marks() {
 747   SharedRestorePreservedMarksTaskExecutor task_executor(NULL);
 748   _preserved_marks_set.restore(&task_executor);
 749 }
 750 
 751 void DefNewGeneration::handle_promotion_failure(oop old) {
 752   log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());
 753 
 754   _promotion_failed = true;
 755   _promotion_failed_info.register_copy_failure(old->size());
 756   _preserved_marks_set.get()->push_if_necessary(old, old->mark());
 757   // forward to self
 758   old->forward_to(old);
 759 
 760   _promo_failure_scan_stack.push(old);
 761 
 762   if (!_promo_failure_drain_in_progress) {
 763     // prevent recursion in copy_to_survivor_space()
 764     _promo_failure_drain_in_progress = true;
 765     drain_promo_failure_scan_stack();
 766     _promo_failure_drain_in_progress = false;
 767   }
 768 }
 769 
 770 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 771   assert(is_in_reserved(old) && !old->is_forwarded(),
 772          "shouldn't be scavenging this oop");
 773   size_t s = old->size();
 774   oop obj = NULL;
 775 
 776   // Try allocating obj in to-space (unless too old)
 777   if (old->age() < tenuring_threshold()) {
 778     obj = (oop) to()->allocate_aligned(s);
 779   }
 780 
 781   // Otherwise try allocating obj tenured
 782   if (obj == NULL) {
 783     obj = _old_gen->promote(old, s);
 784     if (obj == NULL) {
 785       handle_promotion_failure(old);
 786       return old;
 787     }
 788   } else {
 789     // Prefetch beyond obj
 790     const intx interval = PrefetchCopyIntervalInBytes;
 791     Prefetch::write(obj, interval);
 792 
 793     // Copy obj
 794     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
 795 
 796     // Increment age if obj still in new generation
 797     obj->incr_age();
 798     age_table()->add(obj, s);
 799   }
 800 
 801   // Done, insert forward pointer to obj in this header
 802   old->forward_to(obj);
 803 
 804   return obj;
 805 }
 806 
 807 void DefNewGeneration::drain_promo_failure_scan_stack() {
 808   while (!_promo_failure_scan_stack.is_empty()) {
 809      oop obj = _promo_failure_scan_stack.pop();
 810      obj->oop_iterate(_promo_failure_scan_stack_closure);
 811   }
 812 }
 813 
 814 void DefNewGeneration::save_marks() {
 815   eden()->set_saved_mark();
 816   to()->set_saved_mark();
 817   from()->set_saved_mark();
 818 }
 819 
 820 
 821 void DefNewGeneration::reset_saved_marks() {
 822   eden()->reset_saved_mark();
 823   to()->reset_saved_mark();
 824   from()->reset_saved_mark();
 825 }
 826 
 827 
 828 bool DefNewGeneration::no_allocs_since_save_marks() {
 829   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 830   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 831   return to()->saved_mark_at_top();
 832 }
 833 
 834 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
 835                                                                 \
 836 void DefNewGeneration::                                         \
 837 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
 838   cl->set_generation(this);                                     \
 839   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 840   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
 841   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
 842   cl->reset_generation();                                       \
 843   save_marks();                                                 \
 844 }
 845 
 846 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
 847 
 848 #undef DefNew_SINCE_SAVE_MARKS_DEFN
 849 
 850 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
 851                                          size_t max_alloc_words) {
 852   if (requestor == this || _promotion_failed) {
 853     return;
 854   }
 855   assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
 856 
 857   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
 858   if (to_space->top() > to_space->bottom()) {
 859     trace("to_space not empty when contribute_scratch called");
 860   }
 861   */
 862 
 863   ContiguousSpace* to_space = to();
 864   assert(to_space->end() >= to_space->top(), "pointers out of order");
 865   size_t free_words = pointer_delta(to_space->end(), to_space->top());
 866   if (free_words >= MinFreeScratchWords) {
 867     ScratchBlock* sb = (ScratchBlock*)to_space->top();
 868     sb->num_words = free_words;
 869     sb->next = list;
 870     list = sb;
 871   }
 872 }
 873 
 874 void DefNewGeneration::reset_scratch() {
 875   // If contributing scratch in to_space, mangle all of
 876   // to_space if ZapUnusedHeapArea.  This is needed because
 877   // top is not maintained while using to-space as scratch.
 878   if (ZapUnusedHeapArea) {
 879     to()->mangle_unused_area_complete();
 880   }
 881 }
 882 
 883 bool DefNewGeneration::collection_attempt_is_safe() {
 884   if (!to()->is_empty()) {
 885     log_trace(gc)(":: to is not empty ::");
 886     return false;
 887   }
 888   if (_old_gen == NULL) {
 889     GenCollectedHeap* gch = GenCollectedHeap::heap();
 890     _old_gen = gch->old_gen();
 891   }
 892   return _old_gen->promotion_attempt_is_safe(used());
 893 }
 894 
 895 void DefNewGeneration::gc_epilogue(bool full) {
 896   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
 897 
 898   assert(!GCLocker::is_active(), "We should not be executing here");
 899   // Check if the heap is approaching full after a collection has
 900   // been done.  Generally the young generation is empty at
 901   // a minimum at the end of a collection.  If it is not, then
 902   // the heap is approaching full.
 903   GenCollectedHeap* gch = GenCollectedHeap::heap();
 904   if (full) {
 905     DEBUG_ONLY(seen_incremental_collection_failed = false;)
 906     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
 907       log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
 908                             GCCause::to_string(gch->gc_cause()));
 909       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
 910       set_should_allocate_from_space(); // we seem to be running out of space
 911     } else {
 912       log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
 913                             GCCause::to_string(gch->gc_cause()));
 914       gch->clear_incremental_collection_failed(); // We just did a full collection
 915       clear_should_allocate_from_space(); // if set
 916     }
 917   } else {
 918 #ifdef ASSERT
 919     // It is possible that incremental_collection_failed() == true
 920     // here, because an attempted scavenge did not succeed. The policy
 921     // is normally expected to cause a full collection which should
 922     // clear that condition, so we should not be here twice in a row
 923     // with incremental_collection_failed() == true without having done
 924     // a full collection in between.
 925     if (!seen_incremental_collection_failed &&
 926         gch->incremental_collection_failed()) {
 927       log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
 928                             GCCause::to_string(gch->gc_cause()));
 929       seen_incremental_collection_failed = true;
 930     } else if (seen_incremental_collection_failed) {
 931       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
 932                             GCCause::to_string(gch->gc_cause()));
 933       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
 934              (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
 935              !gch->incremental_collection_failed(),
 936              "Twice in a row");
 937       seen_incremental_collection_failed = false;
 938     }
 939 #endif // ASSERT
 940   }
 941 
 942   if (ZapUnusedHeapArea) {
 943     eden()->check_mangled_unused_area_complete();
 944     from()->check_mangled_unused_area_complete();
 945     to()->check_mangled_unused_area_complete();
 946   }
 947 
 948   if (!CleanChunkPoolAsync) {
 949     Chunk::clean_chunk_pool();
 950   }
 951 
 952   // update the generation and space performance counters
 953   update_counters();
 954   gch->counters()->update_counters();
 955 }
 956 
 957 void DefNewGeneration::record_spaces_top() {
 958   assert(ZapUnusedHeapArea, "Not mangling unused space");
 959   eden()->set_top_for_allocations();
 960   to()->set_top_for_allocations();
 961   from()->set_top_for_allocations();
 962 }
 963 
 964 void DefNewGeneration::ref_processor_init() {
 965   Generation::ref_processor_init();
 966 }
 967 
 968 
 969 void DefNewGeneration::update_counters() {
 970   if (UsePerfData) {
 971     _eden_counters->update_all();
 972     _from_counters->update_all();
 973     _to_counters->update_all();
 974     _gen_counters->update_all();
 975   }
 976 }
 977 
 978 void DefNewGeneration::verify() {
 979   eden()->verify();
 980   from()->verify();
 981     to()->verify();
 982 }
 983 
 984 void DefNewGeneration::print_on(outputStream* st) const {
 985   Generation::print_on(st);
 986   st->print("  eden");
 987   eden()->print_on(st);
 988   st->print("  from");
 989   from()->print_on(st);
 990   st->print("  to  ");
 991   to()->print_on(st);
 992 }
 993 
 994 
 995 const char* DefNewGeneration::name() const {
 996   return "def new generation";
 997 }
 998 
 999 // Moved from inline file as they are not called inline
1000 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
1001   return eden();
1002 }
1003 
1004 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
1005   // This is the slow-path allocation for the DefNewGeneration.
1006   // Most allocations are fast-path in compiled code.
1007   // We try to allocate from the eden.  If that works, we are happy.
1008   // Note that since DefNewGeneration supports lock-free allocation, we
1009   // have to use it here, as well.
1010   HeapWord* result = eden()->par_allocate(word_size);
1011   if (result != NULL) {
1012     if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
1013       _old_gen->sample_eden_chunk();
1014     }
1015   } else {
1016     // If the eden is full and the last collection bailed out, we are running
1017     // out of heap space, and we try to allocate the from-space, too.
1018     // allocate_from_space can't be inlined because that would introduce a
1019     // circular dependency at compile time.
1020     result = allocate_from_space(word_size);
1021   }
1022   return result;
1023 }
1024 
1025 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1026                                          bool is_tlab) {
1027   HeapWord* res = eden()->par_allocate(word_size);
1028   if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
1029     _old_gen->sample_eden_chunk();
1030   }
1031   return res;
1032 }
1033 
1034 size_t DefNewGeneration::tlab_capacity() const {
1035   return eden()->capacity();
1036 }
1037 
1038 size_t DefNewGeneration::tlab_used() const {
1039   return eden()->used();
1040 }
1041 
1042 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1043   return unsafe_max_alloc_nogc();
1044 }