1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)defNewGeneration.cpp 1.73 07/05/22 17:24:57 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 # include "incls/_precompiled.incl"
  29 # include "incls/_defNewGeneration.cpp.incl"
  30 
  31 //
  32 // DefNewGeneration functions.
  33 
  34 // Methods of protected closure types.
  35 
  36 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
  37   assert(g->level() == 0, "Optimized for youngest gen.");
  38 }
  39 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
  40   assert(false, "Do not call.");
  41 }
  42 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
  43   return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
  44 }
  45 
  46 DefNewGeneration::KeepAliveClosure::
  47 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
  48   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
  49   assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
  50   _rs = (CardTableRS*)rs;
  51 }
  52 
  53 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) {
  54   // We never expect to see a null reference being processed
  55   // as a weak reference.
  56   assert (*p != NULL, "expected non-null ref");
  57   assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
  58 
  59   _cl->do_oop_nv(p);
  60 
  61   // Card marking is trickier for weak refs.
  62   // This oop is a 'next' field which was filled in while we
  63   // were discovering weak references. While we might not need
  64   // to take a special action to keep this reference alive, we
  65   // will need to dirty a card as the field was modified.
  66   //  
  67   // Alternatively, we could create a method which iterates through
  68   // each generation, allowing them in turn to examine the modified
  69   // field.
  70   //
  71   // We could check that p is also in an older generation, but
  72   // dirty cards in the youngest gen are never scanned, so the
  73   // extra check probably isn't worthwhile.
  74   if (Universe::heap()->is_in_reserved(p)) {
  75     _rs->inline_write_ref_field_gc(p, *p);
  76   }
  77 }
  78 
  79 DefNewGeneration::FastKeepAliveClosure::
  80 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
  81   DefNewGeneration::KeepAliveClosure(cl) {
  82   _boundary = g->reserved().end();
  83 }
  84 
  85 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) {
  86   assert (*p != NULL, "expected non-null ref");
  87   assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
  88 
  89   _cl->do_oop_nv(p);
  90 
  91   // Optimized for Defnew generation if it's the youngest generation:
  92   // we set a younger_gen card if we have an older->youngest
  93   // generation pointer.
  94   if (((HeapWord*)(*p) < _boundary) && Universe::heap()->is_in_reserved(p)) {
  95     _rs->inline_write_ref_field_gc(p, *p);
  96   }
  97 }
  98 
  99 DefNewGeneration::EvacuateFollowersClosure::
 100 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
 101                          ScanClosure* cur, ScanClosure* older) :
 102   _gch(gch), _level(level),
 103   _scan_cur_or_nonheap(cur), _scan_older(older)
 104 {}
 105 
 106 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
 107   do {
 108     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
 109                                        _scan_older);
 110   } while (!_gch->no_allocs_since_save_marks(_level));
 111 }
 112 
 113 DefNewGeneration::FastEvacuateFollowersClosure::
 114 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
 115                              DefNewGeneration* gen,
 116                              FastScanClosure* cur, FastScanClosure* older) :
 117   _gch(gch), _level(level), _gen(gen),
 118   _scan_cur_or_nonheap(cur), _scan_older(older)
 119 {}
 120 
 121 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
 122   do {
 123     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
 124                                        _scan_older);
 125   } while (!_gch->no_allocs_since_save_marks(_level));
 126   guarantee(_gen->promo_failure_scan_stack() == NULL
 127             || _gen->promo_failure_scan_stack()->length() == 0,
 128             "Failed to finish scan");
 129 }
 130 
 131 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 
 132   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 133 {
 134   assert(_g->level() == 0, "Optimized for youngest generation");
 135   _boundary = _g->reserved().end();
 136 }
 137 
 138 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 
 139   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
 140 {
 141   assert(_g->level() == 0, "Optimized for youngest generation");
 142   _boundary = _g->reserved().end();
 143 }
 144 
 145 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
 146   OopClosure(g->ref_processor()), _g(g)
 147 {
 148   assert(_g->level() == 0, "Optimized for youngest generation");
 149   _boundary = _g->reserved().end();
 150 }
 151 
 152 
 153 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 154                                    size_t initial_size,
 155                                    int level,
 156                                    const char* policy)
 157   : Generation(rs, initial_size, level),
 158     _objs_with_preserved_marks(NULL), 
 159     _preserved_marks_of_objs(NULL), 
 160     _promo_failure_scan_stack(NULL),
 161     _promo_failure_drain_in_progress(false),
 162     _should_allocate_from_space(false)
 163 {
 164   MemRegion cmr((HeapWord*)_virtual_space.low(),
 165                 (HeapWord*)_virtual_space.high());
 166   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 167 
 168   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 169     _eden_space = new ConcEdenSpace(this);
 170   } else {
 171     _eden_space = new EdenSpace(this);
 172   }
 173   _from_space = new ContiguousSpace();
 174   _to_space   = new ContiguousSpace();
 175 
 176   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 177     vm_exit_during_initialization("Could not allocate a new gen space");
 178 
 179   // Compute the maximum eden and survivor space sizes. These sizes
 180   // are computed assuming the entire reserved space is committed.
 181   // These values are exported as performance counters.
 182   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 183   uintx size = _virtual_space.reserved_size();
 184   _max_survivor_size = compute_survivor_size(size, alignment);
 185   _max_eden_size = size - (2*_max_survivor_size);
 186 
 187   // allocate the performance counters
 188 
 189   // Generation counters -- generation 0, 3 subspaces
 190   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
 191   _gc_counters = new CollectorCounters(policy, 0);
 192 
 193   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 194                                       _gen_counters);
 195   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 196                                       _gen_counters);
 197   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 198                                     _gen_counters);
 199 
 200   compute_space_boundaries(0);
 201   update_counters();
 202   _next_gen = NULL;
 203   _tenuring_threshold = MaxTenuringThreshold;
 204   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 205 }
 206 
 207 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
 208   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 209 
 210   // Compute sizes
 211   uintx size = _virtual_space.committed_size();
 212   uintx survivor_size = compute_survivor_size(size, alignment);
 213   uintx eden_size = size - (2*survivor_size);
 214   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 215 
 216   if (eden_size < minimum_eden_size) {
 217     // May happen due to 64Kb rounding, if so adjust eden size back up
 218     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
 219     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 220     uintx unaligned_survivor_size = 
 221       align_size_down(maximum_survivor_size, alignment);
 222     survivor_size = MAX2(unaligned_survivor_size, alignment);
 223     eden_size = size - (2*survivor_size);
 224     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 225     assert(eden_size >= minimum_eden_size, "just checking");
 226   }
 227 
 228   char *eden_start = _virtual_space.low();
 229   char *from_start = eden_start + eden_size;
 230   char *to_start   = from_start + survivor_size;
 231   char *to_end     = to_start   + survivor_size;
 232 
 233   assert(to_end == _virtual_space.high(), "just checking");
 234   assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
 235   assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
 236   assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
 237 
 238   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 239   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 240   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 241 
 242   eden()->initialize(edenMR, (minimum_eden_size == 0));
 243   // If minumum_eden_size != 0, we will not have cleared any
 244   // portion of eden above its top. This can cause newly
 245   // expanded space not to be mangled if using ZapUnusedHeapArea.
 246   // We explicitly do such mangling here.
 247   if (ZapUnusedHeapArea && (minimum_eden_size != 0)) {
 248     eden()->mangle_unused_area();
 249   }
 250   from()->initialize(fromMR, true);
 251     to()->initialize(toMR  , true);
 252   eden()->set_next_compaction_space(from());
 253   // The to-space is normally empty before a compaction so need
 254   // not be considered.  The exception is during promotion
 255   // failure handling when to-space can contain live objects.
 256   from()->set_next_compaction_space(NULL);
 257 }
 258 
 259 void DefNewGeneration::swap_spaces() {
 260   ContiguousSpace* s = from();
 261   _from_space        = to();
 262   _to_space          = s;
 263   eden()->set_next_compaction_space(from());
 264   // The to-space is normally empty before a compaction so need
 265   // not be considered.  The exception is during promotion
 266   // failure handling when to-space can contain live objects.
 267   from()->set_next_compaction_space(NULL);
 268 
 269   if (UsePerfData) {
 270     CSpaceCounters* c = _from_counters;
 271     _from_counters = _to_counters;
 272     _to_counters = c;
 273   }
 274 }
 275 
 276 bool DefNewGeneration::expand(size_t bytes) {
 277   MutexLocker x(ExpandHeap_lock);
 278   bool success = _virtual_space.expand_by(bytes);
 279 
 280   // Do not attempt an expand-to-the reserve size.  The
 281   // request should properly observe the maximum size of
 282   // the generation so an expand-to-reserve should be
 283   // unnecessary.  Also a second call to expand-to-reserve
 284   // value potentially can cause an undue expansion.
 285   // For example if the first expand fail for unknown reasons,
 286   // but the second succeeds and expands the heap to its maximum
 287   // value.
 288   if (GC_locker::is_active()) {
 289     if (PrintGC && Verbose) {
 290       gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
 291     }
 292   }
 293 
 294   return success;
 295 }
 296 
 297 
 298 void DefNewGeneration::compute_new_size() {
 299   // This is called after a gc that includes the following generation
 300   // (which is required to exist.)  So from-space will normally be empty.
 301   // Note that we check both spaces, since if scavenge failed they revert roles.
 302   // If not we bail out (otherwise we would have to relocate the objects)
 303   if (!from()->is_empty() || !to()->is_empty()) {
 304     return;
 305   }
 306 
 307   int next_level = level() + 1;
 308   GenCollectedHeap* gch = GenCollectedHeap::heap();
 309   assert(next_level < gch->_n_gens,
 310          "DefNewGeneration cannot be an oldest gen");
 311     
 312   Generation* next_gen = gch->_gens[next_level];
 313   size_t old_size = next_gen->capacity();
 314   size_t new_size_before = _virtual_space.committed_size();
 315   size_t min_new_size = spec()->init_size();
 316   size_t max_new_size = reserved().byte_size();
 317   assert(min_new_size <= new_size_before &&
 318          new_size_before <= max_new_size,
 319          "just checking");
 320   // All space sizes must be multiples of Generation::GenGrain.
 321   size_t alignment = Generation::GenGrain;
 322 
 323   // Compute desired new generation size based on NewRatio and
 324   // NewSizeThreadIncrease
 325   size_t desired_new_size = old_size/NewRatio;
 326   int threads_count = Threads::number_of_non_daemon_threads();
 327   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
 328   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
 329 
 330   // Adjust new generation size
 331   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
 332   assert(desired_new_size <= max_new_size, "just checking");
 333 
 334   bool changed = false;
 335   if (desired_new_size > new_size_before) {
 336     size_t change = desired_new_size - new_size_before;
 337     assert(change % alignment == 0, "just checking");
 338     if (expand(change)) {
 339        changed = true;
 340     }
 341     // If the heap failed to expand to the desired size,
 342     // "changed" will be false.  If the expansion failed
 343     // (and at this point it was expected to succeed), 
 344     // ignore the failure (leaving "changed" as false).
 345   }
 346   if (desired_new_size < new_size_before && eden()->is_empty()) {
 347     // bail out of shrinking if objects in eden
 348     size_t change = new_size_before - desired_new_size;
 349     assert(change % alignment == 0, "just checking");
 350     _virtual_space.shrink_by(change);
 351     changed = true;
 352   }
 353   if (changed) {
 354     compute_space_boundaries(eden()->used());
 355     MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
 356     Universe::heap()->barrier_set()->resize_covered_region(cmr);
 357     if (Verbose && PrintGC) {
 358       size_t new_size_after  = _virtual_space.committed_size();
 359       size_t eden_size_after = eden()->capacity();
 360       size_t survivor_size_after = from()->capacity();
 361       gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" 
 362         SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 
 363         new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K);
 364       if (WizardMode) {
 365         gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 
 366           thread_increase_size/K, threads_count);
 367       }
 368       gclog_or_tty->cr();
 369     }
 370   }
 371 }
 372 
 373 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
 374   // $$$ This may be wrong in case of "scavenge failure"?
 375   eden()->object_iterate(cl);
 376 }
 377 
 378 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
 379   assert(false, "NYI -- are you sure you want to call this?");
 380 }
 381 
 382 
 383 size_t DefNewGeneration::capacity() const {
 384   return eden()->capacity()
 385        + from()->capacity();  // to() is only used during scavenge
 386 }
 387 
 388 
 389 size_t DefNewGeneration::used() const {
 390   return eden()->used()
 391        + from()->used();      // to() is only used during scavenge
 392 }
 393 
 394 
 395 size_t DefNewGeneration::free() const {
 396   return eden()->free()
 397        + from()->free();      // to() is only used during scavenge
 398 }
 399 
 400 size_t DefNewGeneration::max_capacity() const {
 401   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 402   const size_t reserved_bytes = reserved().byte_size();
 403   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
 404 }
 405 
 406 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 407   return eden()->free();
 408 }
 409 
 410 size_t DefNewGeneration::capacity_before_gc() const {
 411   return eden()->capacity();
 412 }
 413 
 414 size_t DefNewGeneration::contiguous_available() const {
 415   return eden()->free();
 416 }
 417 
 418 
 419 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 420 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 421 
 422 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 423   eden()->object_iterate(blk);
 424   from()->object_iterate(blk);
 425 }
 426 
 427 
 428 void DefNewGeneration::space_iterate(SpaceClosure* blk,
 429                                      bool usedOnly) {
 430   blk->do_space(eden());
 431   blk->do_space(from());
 432   blk->do_space(to());
 433 }
 434 
 435 // The last collection bailed out, we are running out of heap space, 
 436 // so we try to allocate the from-space, too.
 437 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 438   HeapWord* result = NULL;
 439   if (PrintGC && Verbose) {
 440     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
 441                   "  will_fail: %s"
 442                   "  heap_lock: %s"
 443                   "  free: " SIZE_FORMAT,
 444                   size,
 445                GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
 446                Heap_lock->is_locked() ? "locked" : "unlocked",
 447                from()->free());
 448     }
 449   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
 450     if (Heap_lock->owned_by_self() ||
 451         (SafepointSynchronize::is_at_safepoint() &&
 452          Thread::current()->is_VM_thread())) {
 453       // If the Heap_lock is not locked by this thread, this will be called 
 454       // again later with the Heap_lock held.
 455       result = from()->allocate(size);
 456     } else if (PrintGC && Verbose) {
 457       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
 458     }
 459   } else if (PrintGC && Verbose) {
 460     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
 461   }
 462   if (PrintGC && Verbose) {
 463     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
 464   }
 465   return result;
 466 }
 467 
 468 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 469                                                 bool   is_tlab,
 470                                                 bool   parallel) {
 471   // We don't attempt to expand the young generation (but perhaps we should.)
 472   return allocate(size, is_tlab);
 473 }
 474 
 475 
 476 void DefNewGeneration::collect(bool   full,
 477                                bool   clear_all_soft_refs,
 478                                size_t size,
 479                                bool   is_tlab) {
 480   assert(full || size > 0, "otherwise we don't want to collect");
 481   GenCollectedHeap* gch = GenCollectedHeap::heap();
 482   _next_gen = gch->next_gen(this);
 483   assert(_next_gen != NULL, 
 484     "This must be the youngest gen, and not the only gen");
 485 
 486   // If the next generation is too full to accomodate promotion
 487   // from this generation, pass on collection; let the next generation
 488   // do it.
 489   if (!collection_attempt_is_safe()) {
 490     gch->set_incremental_collection_will_fail();
 491     return;
 492   }
 493   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 494 
 495   init_assuming_no_promotion_failure();
 496 
 497   TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
 498   // Capture heap used before collection (for printing).
 499   size_t gch_prev_used = gch->used();
 500 
 501   SpecializationStats::clear();
 502 
 503   // These can be shared for all code paths
 504   IsAliveClosure is_alive(this);
 505   ScanWeakRefClosure scan_weak_ref(this);
 506 
 507   age_table()->clear();
 508   to()->clear();
 509 
 510   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 511 
 512   assert(gch->no_allocs_since_save_marks(0), 
 513          "save marks have not been newly set.");
 514 
 515   // Weak refs.
 516   // FIXME: Are these storage leaks, or are they resource objects?
 517 #ifdef COMPILER2
 518   ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
 519 #else 
 520   ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
 521 #endif // COMPILER2
 522       
 523   // Not very pretty.
 524   CollectorPolicy* cp = gch->collector_policy();
 525 
 526   FastScanClosure fsc_with_no_gc_barrier(this, false);
 527   FastScanClosure fsc_with_gc_barrier(this, true);
 528 
 529   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
 530   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
 531                                                   &fsc_with_no_gc_barrier,
 532                                                   &fsc_with_gc_barrier);
 533 
 534   assert(gch->no_allocs_since_save_marks(0),
 535          "save marks have not been newly set.");
 536 
 537   gch->gen_process_strong_roots(_level,
 538                                 true, // Process younger gens, if any, as
 539                                       // strong roots.
 540                                 false,// not collecting permanent generation.
 541                                 SharedHeap::SO_AllClasses,
 542                                 &fsc_with_gc_barrier,
 543                                 &fsc_with_no_gc_barrier);
 544 
 545   // "evacuate followers".
 546   evacuate_followers.do_void();
 547 
 548   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 549   ref_processor()->process_discovered_references(
 550     soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
 551   if (!promotion_failed()) {
 552     // Swap the survivor spaces.
 553     eden()->clear();
 554     from()->clear();
 555     swap_spaces();
 556   
 557     assert(to()->is_empty(), "to space should be empty now");
 558 
 559     // Set the desired survivor size to half the real survivor space
 560     _tenuring_threshold =
 561       age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 562 
 563     if (PrintGC && !PrintGCDetails) {
 564       gch->print_heap_change(gch_prev_used);
 565     }
 566   } else {
 567     assert(HandlePromotionFailure, 
 568       "Should not be here unless promotion failure handling is on");
 569     assert(_promo_failure_scan_stack != NULL && 
 570       _promo_failure_scan_stack->length() == 0, "post condition");
 571 
 572     // deallocate stack and it's elements
 573     delete _promo_failure_scan_stack;
 574     _promo_failure_scan_stack = NULL;
 575 
 576     remove_forwarding_pointers();
 577     if (PrintGCDetails) {
 578       gclog_or_tty->print(" (promotion failed)");
 579     }
 580     // Add to-space to the list of space to compact
 581     // when a promotion failure has occurred.  In that
 582     // case there can be live objects in to-space
 583     // as a result of a partial evacuation of eden
 584     // and from-space.
 585     swap_spaces();   // For the sake of uniformity wrt ParNewGeneration::collect().
 586     from()->set_next_compaction_space(to());
 587     gch->set_incremental_collection_will_fail();
 588 
 589     // Reset the PromotionFailureALot counters.
 590     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 591   }
 592   // set new iteration safe limit for the survivor spaces
 593   from()->set_concurrent_iteration_safe_limit(from()->top());
 594   to()->set_concurrent_iteration_safe_limit(to()->top());
 595   SpecializationStats::print();
 596   update_time_of_last_gc(os::javaTimeMillis());
 597 }
 598 
 599 class RemoveForwardPointerClosure: public ObjectClosure {
 600 public:
 601   void do_object(oop obj) {
 602     obj->init_mark();
 603   }
 604 };
 605 
 606 void DefNewGeneration::init_assuming_no_promotion_failure() {
 607   _promotion_failed = false;
 608   from()->set_next_compaction_space(NULL);
 609 }
 610 
 611 void DefNewGeneration::remove_forwarding_pointers() {
 612   RemoveForwardPointerClosure rspc;
 613   eden()->object_iterate(&rspc);
 614   from()->object_iterate(&rspc);
 615   // Now restore saved marks, if any.
 616   if (_objs_with_preserved_marks != NULL) {
 617     assert(_preserved_marks_of_objs != NULL, "Both or none.");
 618     assert(_objs_with_preserved_marks->length() ==
 619            _preserved_marks_of_objs->length(), "Both or none.");
 620     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
 621       oop obj   = _objs_with_preserved_marks->at(i);
 622       markOop m = _preserved_marks_of_objs->at(i);
 623       obj->set_mark(m);
 624     }
 625     delete _objs_with_preserved_marks;
 626     delete _preserved_marks_of_objs;
 627     _objs_with_preserved_marks = NULL;
 628     _preserved_marks_of_objs = NULL;
 629   }
 630 }
 631 
 632 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
 633   if (m->must_be_preserved_for_promotion_failure(obj)) {
 634     if (_objs_with_preserved_marks == NULL) {
 635       assert(_preserved_marks_of_objs == NULL, "Both or none.");
 636       _objs_with_preserved_marks = new (ResourceObj::C_HEAP) 
 637         GrowableArray<oop>(PreserveMarkStackSize, true);
 638       _preserved_marks_of_objs = new (ResourceObj::C_HEAP) 
 639         GrowableArray<markOop>(PreserveMarkStackSize, true);
 640     }
 641     _objs_with_preserved_marks->push(obj);
 642     _preserved_marks_of_objs->push(m);
 643   }
 644 }
 645 
 646 void DefNewGeneration::handle_promotion_failure(oop old) {
 647   preserve_mark_if_necessary(old, old->mark());
 648   // forward to self
 649   old->forward_to(old);
 650   _promotion_failed = true;
 651 
 652   push_on_promo_failure_scan_stack(old);
 653 
 654   if (!_promo_failure_drain_in_progress) {
 655     // prevent recursion in copy_to_survivor_space()
 656     _promo_failure_drain_in_progress = true;
 657     drain_promo_failure_scan_stack();
 658     _promo_failure_drain_in_progress = false;
 659   }
 660 }
 661 
 662 oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
 663   assert(is_in_reserved(old) && !old->is_forwarded(),
 664          "shouldn't be scavenging this oop"); 
 665   size_t s = old->size();
 666   oop obj = NULL;
 667   
 668   // Try allocating obj in to-space (unless too old)
 669   if (old->age() < tenuring_threshold()) {
 670     obj = (oop) to()->allocate(s);
 671   }
 672 
 673   // Otherwise try allocating obj tenured
 674   if (obj == NULL) {
 675     obj = _next_gen->promote(old, s, from);
 676     if (obj == NULL) {
 677       if (!HandlePromotionFailure) {
 678         // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
 679         // is incorrectly set. In any case, its seriously wrong to be here!
 680         vm_exit_out_of_memory(s*wordSize, "promotion");
 681       }
 682 
 683       handle_promotion_failure(old);
 684       return old;
 685     }
 686   } else {
 687     // Prefetch beyond obj
 688     const intx interval = PrefetchCopyIntervalInBytes;
 689     Prefetch::write(obj, interval);
 690 
 691     // Copy obj
 692     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
 693 
 694     // Increment age if obj still in new generation
 695     obj->incr_age(); 
 696     age_table()->add(obj, s);
 697   }
 698 
 699   // Done, insert forward pointer to obj in this header
 700   old->forward_to(obj);
 701 
 702   return obj;
 703 }
 704 
 705 void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
 706   if (_promo_failure_scan_stack == NULL) {
 707     _promo_failure_scan_stack = new (ResourceObj::C_HEAP)
 708                                     GrowableArray<oop>(40, true);
 709   }
 710 
 711   _promo_failure_scan_stack->push(obj);
 712 }
 713 
 714 void DefNewGeneration::drain_promo_failure_scan_stack() {
 715   assert(_promo_failure_scan_stack != NULL, "precondition");
 716 
 717   while (_promo_failure_scan_stack->length() > 0) {
 718      oop obj = _promo_failure_scan_stack->pop();
 719      obj->oop_iterate(_promo_failure_scan_stack_closure);
 720   }
 721 }
 722 
 723 void DefNewGeneration::save_marks() { 
 724   eden()->set_saved_mark();
 725   to()->set_saved_mark();
 726   from()->set_saved_mark();
 727 }
 728 
 729 
 730 void DefNewGeneration::reset_saved_marks() { 
 731   eden()->reset_saved_mark();
 732   to()->reset_saved_mark();
 733   from()->reset_saved_mark();
 734 }
 735 
 736 
 737 bool DefNewGeneration::no_allocs_since_save_marks() {
 738   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 739   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 740   return to()->saved_mark_at_top();
 741 }
 742 
 743 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
 744                                                                 \
 745 void DefNewGeneration::                                         \
 746 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
 747   cl->set_generation(this);                                  \
 748   eden()->oop_since_save_marks_iterate##nv_suffix(cl);               \
 749   to()->oop_since_save_marks_iterate##nv_suffix(cl);         \
 750   from()->oop_since_save_marks_iterate##nv_suffix(cl);               \
 751   cl->reset_generation();                                    \
 752   save_marks();                                                 \
 753 }
 754 
 755 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
 756 
 757 #undef DefNew_SINCE_SAVE_MARKS_DEFN
 758 
 759 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
 760                                          size_t max_alloc_words) {
 761   if (requestor == this || _promotion_failed) return;
 762   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
 763 
 764   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
 765   if (to_space->top() > to_space->bottom()) {
 766     trace("to_space not empty when contribute_scratch called");
 767   }
 768   */
 769 
 770   ContiguousSpace* to_space = to();
 771   assert(to_space->end() >= to_space->top(), "pointers out of order");
 772   size_t free_words = pointer_delta(to_space->end(), to_space->top());
 773   if (free_words >= MinFreeScratchWords) {
 774     ScratchBlock* sb = (ScratchBlock*)to_space->top();
 775     sb->num_words = free_words;
 776     sb->next = list;
 777     list = sb;
 778   }
 779 }
 780 
 781 bool DefNewGeneration::collection_attempt_is_safe() {
 782   if (!to()->is_empty()) {
 783     return false;
 784   }
 785   if (_next_gen == NULL) {
 786     GenCollectedHeap* gch = GenCollectedHeap::heap();
 787     _next_gen = gch->next_gen(this);
 788     assert(_next_gen != NULL, 
 789            "This must be the youngest gen, and not the only gen");
 790   }
 791 
 792   // Decide if there's enough room for a full promotion
 793   // When using extremely large edens, we effectively lose a 
 794   // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio" 
 795   // flag to reduce the minimum evacuation space requirements. If 
 796   // there is not enough space to evacuate eden during a scavenge, 
 797   // the VM will immediately exit with an out of memory error. 
 798   // This flag has not been tested
 799   // with collectors other than simple mark & sweep.
 800   //
 801   // Note that with the addition of promotion failure handling, the
 802   // VM will not immediately exit but will undo the young generation
 803   // collection.  The parameter is left here for compatibility.
 804   const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
 805 
 806   // worst_case_evacuation is based on "used()".  For the case where this
 807   // method is called after a collection, this is still appropriate because
 808   // the case that needs to be detected is one in which a full collection
 809   // has been done and has overflowed into the young generation.  In that
 810   // case a minor collection will fail (the overflow of the full collection
 811   // means there is no space in the old generation for any promotion).
 812   size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
 813 
 814   return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
 815                                               HandlePromotionFailure);
 816 }
 817 
 818 void DefNewGeneration::gc_epilogue(bool full) {
 819   // Check if the heap is approaching full after a collection has
 820   // been done.  Generally the young generation is empty at
 821   // a minimum at the end of a collection.  If it is not, then
 822   // the heap is approaching full.
 823   GenCollectedHeap* gch = GenCollectedHeap::heap();
 824   clear_should_allocate_from_space();
 825   if (collection_attempt_is_safe()) {
 826     gch->clear_incremental_collection_will_fail();
 827   } else {
 828     gch->set_incremental_collection_will_fail();
 829     if (full) { // we seem to be running out of space
 830       set_should_allocate_from_space();
 831     }
 832   }
 833   
 834   // update the generation and space performance counters
 835   update_counters();
 836   gch->collector_policy()->counters()->update_counters();
 837 }
 838 
 839 void DefNewGeneration::update_counters() {
 840   if (UsePerfData) {
 841     _eden_counters->update_all();
 842     _from_counters->update_all();
 843     _to_counters->update_all();
 844     _gen_counters->update_all();
 845   }
 846 }
 847 
 848 void DefNewGeneration::verify(bool allow_dirty) {
 849   eden()->verify(allow_dirty);
 850   from()->verify(allow_dirty);
 851     to()->verify(allow_dirty);
 852 }
 853 
 854 void DefNewGeneration::print_on(outputStream* st) const {
 855   Generation::print_on(st);
 856   st->print("  eden");
 857   eden()->print_on(st);
 858   st->print("  from");
 859   from()->print_on(st);
 860   st->print("  to  ");
 861   to()->print_on(st);
 862 }
 863 
 864 
 865 const char* DefNewGeneration::name() const {
 866   return "def new generation";
 867 }