src/share/vm/memory/defNewGeneration.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>
   1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/collectorCounters.hpp"
  27 #include "gc_implementation/shared/gcPolicyCounters.hpp"




  28 #include "gc_implementation/shared/spaceDecorator.hpp"
  29 #include "memory/defNewGeneration.inline.hpp"
  30 #include "memory/gcLocker.inline.hpp"
  31 #include "memory/genCollectedHeap.hpp"
  32 #include "memory/genOopClosures.inline.hpp"
  33 #include "memory/genRemSet.hpp"
  34 #include "memory/generationSpec.hpp"
  35 #include "memory/iterator.hpp"
  36 #include "memory/referencePolicy.hpp"
  37 #include "memory/space.inline.hpp"
  38 #include "oops/instanceRefKlass.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/java.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 #include "utilities/copy.hpp"
  43 #include "utilities/stack.inline.hpp"
  44 
  45 //
  46 // DefNewGeneration functions.
  47 


 206   _max_eden_size = size - (2*_max_survivor_size);
 207 
 208   // allocate the performance counters
 209 
 210   // Generation counters -- generation 0, 3 subspaces
 211   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
 212   _gc_counters = new CollectorCounters(policy, 0);
 213 
 214   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 215                                       _gen_counters);
 216   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 217                                       _gen_counters);
 218   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 219                                     _gen_counters);
 220 
 221   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 222   update_counters();
 223   _next_gen = NULL;
 224   _tenuring_threshold = MaxTenuringThreshold;
 225   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;


 226 }
 227 
 228 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 229                                                 bool clear_space,
 230                                                 bool mangle_space) {
 231   uintx alignment =
 232     GenCollectedHeap::heap()->collector_policy()->min_alignment();
 233 
 234   // If the spaces are being cleared (only done at heap initialization
 235   // currently), the survivor spaces need not be empty.
 236   // Otherwise, no care is taken for used areas in the survivor spaces
 237   // so check.
 238   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 239     "Initialization of the survivor spaces assumes these are empty");
 240 
 241   // Compute sizes
 242   uintx size = _virtual_space.committed_size();
 243   uintx survivor_size = compute_survivor_size(size, alignment);
 244   uintx eden_size = size - (2*survivor_size);
 245   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");


 541 }
 542 
 543 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 544                                                 bool   is_tlab,
 545                                                 bool   parallel) {
 546   // We don't attempt to expand the young generation (but perhaps we should.)
 547   return allocate(size, is_tlab);
 548 }
 549 
 550 void DefNewGeneration::adjust_desired_tenuring_threshold() {
 551   // Set the desired survivor size to half the real survivor space
 552   _tenuring_threshold =
 553     age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 554 }
 555 
 556 void DefNewGeneration::collect(bool   full,
 557                                bool   clear_all_soft_refs,
 558                                size_t size,
 559                                bool   is_tlab) {
 560   assert(full || size > 0, "otherwise we don't want to collect");

 561   GenCollectedHeap* gch = GenCollectedHeap::heap();





 562   _next_gen = gch->next_gen(this);
 563   assert(_next_gen != NULL,
 564     "This must be the youngest gen, and not the only gen");
 565 
 566   // If the next generation is too full to accomodate promotion
 567   // from this generation, pass on collection; let the next generation
 568   // do it.
 569   if (!collection_attempt_is_safe()) {
 570     if (Verbose && PrintGCDetails) {
 571       gclog_or_tty->print(" :: Collection attempt not safe :: ");
 572     }
 573     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
 574     return;
 575   }
 576   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 577 
 578   init_assuming_no_promotion_failure();
 579 
 580   TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
 581   // Capture heap used before collection (for printing).
 582   size_t gch_prev_used = gch->used();
 583 


 584   SpecializationStats::clear();
 585 
 586   // These can be shared for all code paths
 587   IsAliveClosure is_alive(this);
 588   ScanWeakRefClosure scan_weak_ref(this);
 589 
 590   age_table()->clear();
 591   to()->clear(SpaceDecorator::Mangle);
 592 
 593   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 594 
 595   assert(gch->no_allocs_since_save_marks(0),
 596          "save marks have not been newly set.");
 597 
 598   // Not very pretty.
 599   CollectorPolicy* cp = gch->collector_policy();
 600 
 601   FastScanClosure fsc_with_no_gc_barrier(this, false);
 602   FastScanClosure fsc_with_gc_barrier(this, true);
 603 


 614 
 615   int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
 616 
 617   gch->gen_process_strong_roots(_level,
 618                                 true,  // Process younger gens, if any,
 619                                        // as strong roots.
 620                                 true,  // activate StrongRootsScope
 621                                 true,  // is scavenging
 622                                 SharedHeap::ScanningOption(so),
 623                                 &fsc_with_no_gc_barrier,
 624                                 true,   // walk *all* scavengable nmethods
 625                                 &fsc_with_gc_barrier,
 626                                 &klass_scan_closure);
 627 
 628   // "evacuate followers".
 629   evacuate_followers.do_void();
 630 
 631   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 632   ReferenceProcessor* rp = ref_processor();
 633   rp->setup_policy(clear_all_soft_refs);

 634   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 635                                     NULL);
 636   if (!promotion_failed()) {


 637     // Swap the survivor spaces.
 638     eden()->clear(SpaceDecorator::Mangle);
 639     from()->clear(SpaceDecorator::Mangle);
 640     if (ZapUnusedHeapArea) {
 641       // This is now done here because of the piece-meal mangling which
 642       // can check for valid mangling at intermediate points in the
 643       // collection(s).  When a minor collection fails to collect
 644       // sufficient space resizing of the young generation can occur
 645       // an redistribute the spaces in the young generation.  Mangle
 646       // here so that unzapped regions don't get distributed to
 647       // other spaces.
 648       to()->mangle_unused_area();
 649     }
 650     swap_spaces();
 651 
 652     assert(to()->is_empty(), "to space should be empty now");
 653 
 654     adjust_desired_tenuring_threshold();
 655 
 656     // A successful scavenge should restart the GC time limit count which is


 663     assert(!gch->incremental_collection_failed(), "Should be clear");
 664   } else {
 665     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 666     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 667 
 668     remove_forwarding_pointers();
 669     if (PrintGCDetails) {
 670       gclog_or_tty->print(" (promotion failed) ");
 671     }
 672     // Add to-space to the list of space to compact
 673     // when a promotion failure has occurred.  In that
 674     // case there can be live objects in to-space
 675     // as a result of a partial evacuation of eden
 676     // and from-space.
 677     swap_spaces();   // For uniformity wrt ParNewGeneration.
 678     from()->set_next_compaction_space(to());
 679     gch->set_incremental_collection_failed();
 680 
 681     // Inform the next generation that a promotion failure occurred.
 682     _next_gen->promotion_failure_occurred();

 683 
 684     // Reset the PromotionFailureALot counters.
 685     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 686   }
 687   // set new iteration safe limit for the survivor spaces
 688   from()->set_concurrent_iteration_safe_limit(from()->top());
 689   to()->set_concurrent_iteration_safe_limit(to()->top());
 690   SpecializationStats::print();
 691 
 692   // We need to use a monotonically non-deccreasing time in ms
 693   // or we will see time-warp warnings and os::javaTimeMillis()
 694   // does not guarantee monotonicity.
 695   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 696   update_time_of_last_gc(now);







 697 }
 698 
 699 class RemoveForwardPointerClosure: public ObjectClosure {
 700 public:
 701   void do_object(oop obj) {
 702     obj->init_mark();
 703   }
 704 };
 705 
 706 void DefNewGeneration::init_assuming_no_promotion_failure() {
 707   _promotion_failed = false;

 708   from()->set_next_compaction_space(NULL);
 709 }
 710 
 711 void DefNewGeneration::remove_forwarding_pointers() {
 712   RemoveForwardPointerClosure rspc;
 713   eden()->object_iterate(&rspc);
 714   from()->object_iterate(&rspc);
 715 
 716   // Now restore saved marks, if any.
 717   assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
 718          "should be the same");
 719   while (!_objs_with_preserved_marks.is_empty()) {
 720     oop obj   = _objs_with_preserved_marks.pop();
 721     markOop m = _preserved_marks_of_objs.pop();
 722     obj->set_mark(m);
 723   }
 724   _objs_with_preserved_marks.clear(true);
 725   _preserved_marks_of_objs.clear(true);
 726 }
 727 
 728 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
 729   assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj),
 730          "Oversaving!");
 731   _objs_with_preserved_marks.push(obj);
 732   _preserved_marks_of_objs.push(m);
 733 }
 734 
 735 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
 736   if (m->must_be_preserved_for_promotion_failure(obj)) {
 737     preserve_mark(obj, m);
 738   }
 739 }
 740 
 741 void DefNewGeneration::handle_promotion_failure(oop old) {
 742   if (PrintPromotionFailure && !_promotion_failed) {
 743     gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
 744                         old->size());
 745   }
 746   _promotion_failed = true;

 747   preserve_mark_if_necessary(old, old->mark());
 748   // forward to self
 749   old->forward_to(old);
 750 
 751   _promo_failure_scan_stack.push(old);
 752 
 753   if (!_promo_failure_drain_in_progress) {
 754     // prevent recursion in copy_to_survivor_space()
 755     _promo_failure_drain_in_progress = true;
 756     drain_promo_failure_scan_stack();
 757     _promo_failure_drain_in_progress = false;
 758   }
 759 }
 760 
 761 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 762   assert(is_in_reserved(old) && !old->is_forwarded(),
 763          "shouldn't be scavenging this oop");
 764   size_t s = old->size();
 765   oop obj = NULL;
 766 


 945     from()->check_mangled_unused_area_complete();
 946     to()->check_mangled_unused_area_complete();
 947   }
 948 
 949   if (!CleanChunkPoolAsync) {
 950     Chunk::clean_chunk_pool();
 951   }
 952 
 953   // update the generation and space performance counters
 954   update_counters();
 955   gch->collector_policy()->counters()->update_counters();
 956 }
 957 
 958 void DefNewGeneration::record_spaces_top() {
 959   assert(ZapUnusedHeapArea, "Not mangling unused space");
 960   eden()->set_top_for_allocations();
 961   to()->set_top_for_allocations();
 962   from()->set_top_for_allocations();
 963 }
 964 




 965 
 966 void DefNewGeneration::update_counters() {
 967   if (UsePerfData) {
 968     _eden_counters->update_all();
 969     _from_counters->update_all();
 970     _to_counters->update_all();
 971     _gen_counters->update_all();
 972   }
 973 }
 974 
 975 void DefNewGeneration::verify() {
 976   eden()->verify();
 977   from()->verify();
 978     to()->verify();
 979 }
 980 
 981 void DefNewGeneration::print_on(outputStream* st) const {
 982   Generation::print_on(st);
 983   st->print("  eden");
 984   eden()->print_on(st);


   1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/collectorCounters.hpp"
  27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  28 #include "gc_implementation/shared/gcHeapSummary.hpp"
  29 #include "gc_implementation/shared/gcTimer.hpp"
  30 #include "gc_implementation/shared/gcTraceTime.hpp"
  31 #include "gc_implementation/shared/gcTrace.hpp"
  32 #include "gc_implementation/shared/spaceDecorator.hpp"
  33 #include "memory/defNewGeneration.inline.hpp"
  34 #include "memory/gcLocker.inline.hpp"
  35 #include "memory/genCollectedHeap.hpp"
  36 #include "memory/genOopClosures.inline.hpp"
  37 #include "memory/genRemSet.hpp"
  38 #include "memory/generationSpec.hpp"
  39 #include "memory/iterator.hpp"
  40 #include "memory/referencePolicy.hpp"
  41 #include "memory/space.inline.hpp"
  42 #include "oops/instanceRefKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/thread.inline.hpp"
  46 #include "utilities/copy.hpp"
  47 #include "utilities/stack.inline.hpp"
  48 
  49 //
  50 // DefNewGeneration functions.
  51 


 210   _max_eden_size = size - (2*_max_survivor_size);
 211 
 212   // allocate the performance counters
 213 
 214   // Generation counters -- generation 0, 3 subspaces
 215   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
 216   _gc_counters = new CollectorCounters(policy, 0);
 217 
 218   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 219                                       _gen_counters);
 220   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 221                                       _gen_counters);
 222   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 223                                     _gen_counters);
 224 
 225   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 226   update_counters();
 227   _next_gen = NULL;
 228   _tenuring_threshold = MaxTenuringThreshold;
 229   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 230 
 231   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 232 }
 233 
 234 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 235                                                 bool clear_space,
 236                                                 bool mangle_space) {
 237   uintx alignment =
 238     GenCollectedHeap::heap()->collector_policy()->min_alignment();
 239 
 240   // If the spaces are being cleared (only done at heap initialization
 241   // currently), the survivor spaces need not be empty.
 242   // Otherwise, no care is taken for used areas in the survivor spaces
 243   // so check.
 244   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 245     "Initialization of the survivor spaces assumes these are empty");
 246 
 247   // Compute sizes
 248   uintx size = _virtual_space.committed_size();
 249   uintx survivor_size = compute_survivor_size(size, alignment);
 250   uintx eden_size = size - (2*survivor_size);
 251   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");


 547 }
 548 
 549 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
 550                                                 bool   is_tlab,
 551                                                 bool   parallel) {
 552   // We don't attempt to expand the young generation (but perhaps we should.)
 553   return allocate(size, is_tlab);
 554 }
 555 
 556 void DefNewGeneration::adjust_desired_tenuring_threshold() {
 557   // Set the desired survivor size to half the real survivor space
 558   _tenuring_threshold =
 559     age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 560 }
 561 
 562 void DefNewGeneration::collect(bool   full,
 563                                bool   clear_all_soft_refs,
 564                                size_t size,
 565                                bool   is_tlab) {
 566   assert(full || size > 0, "otherwise we don't want to collect");
 567 
 568   GenCollectedHeap* gch = GenCollectedHeap::heap();
 569 
 570   _gc_timer->register_gc_start(os::elapsed_counter());
 571   DefNewTracer gc_tracer;
 572   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 573 
 574   _next_gen = gch->next_gen(this);
 575   assert(_next_gen != NULL,
 576     "This must be the youngest gen, and not the only gen");
 577 
 578   // If the next generation is too full to accommodate promotion
 579   // from this generation, pass on collection; let the next generation
 580   // do it.
 581   if (!collection_attempt_is_safe()) {
 582     if (Verbose && PrintGCDetails) {
 583       gclog_or_tty->print(" :: Collection attempt not safe :: ");
 584     }
 585     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
 586     return;
 587   }
 588   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 589 
 590   init_assuming_no_promotion_failure();
 591 
 592   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
 593   // Capture heap used before collection (for printing).
 594   size_t gch_prev_used = gch->used();
 595 
 596   gch->trace_heap_before_gc(&gc_tracer);
 597 
 598   SpecializationStats::clear();
 599 
 600   // These can be shared for all code paths
 601   IsAliveClosure is_alive(this);
 602   ScanWeakRefClosure scan_weak_ref(this);
 603 
 604   age_table()->clear();
 605   to()->clear(SpaceDecorator::Mangle);
 606 
 607   gch->rem_set()->prepare_for_younger_refs_iterate(false);
 608 
 609   assert(gch->no_allocs_since_save_marks(0),
 610          "save marks have not been newly set.");
 611 
 612   // Not very pretty.
 613   CollectorPolicy* cp = gch->collector_policy();
 614 
 615   FastScanClosure fsc_with_no_gc_barrier(this, false);
 616   FastScanClosure fsc_with_gc_barrier(this, true);
 617 


 628 
 629   int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
 630 
 631   gch->gen_process_strong_roots(_level,
 632                                 true,  // Process younger gens, if any,
 633                                        // as strong roots.
 634                                 true,  // activate StrongRootsScope
 635                                 true,  // is scavenging
 636                                 SharedHeap::ScanningOption(so),
 637                                 &fsc_with_no_gc_barrier,
 638                                 true,   // walk *all* scavengable nmethods
 639                                 &fsc_with_gc_barrier,
 640                                 &klass_scan_closure);
 641 
 642   // "evacuate followers".
 643   evacuate_followers.do_void();
 644 
 645   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 646   ReferenceProcessor* rp = ref_processor();
 647   rp->setup_policy(clear_all_soft_refs);
 648   const ReferenceProcessorStats& stats =
 649   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 650                                     NULL, _gc_timer);
 651   gc_tracer.report_gc_reference_stats(stats);
 652 
 653   if (!_promotion_failed) {
 654     // Swap the survivor spaces.
 655     eden()->clear(SpaceDecorator::Mangle);
 656     from()->clear(SpaceDecorator::Mangle);
 657     if (ZapUnusedHeapArea) {
 658       // This is now done here because of the piece-meal mangling which
 659       // can check for valid mangling at intermediate points in the
 660       // collection(s).  When a minor collection fails to collect
 661       // sufficient space resizing of the young generation can occur
 662       // an redistribute the spaces in the young generation.  Mangle
 663       // here so that unzapped regions don't get distributed to
 664       // other spaces.
 665       to()->mangle_unused_area();
 666     }
 667     swap_spaces();
 668 
 669     assert(to()->is_empty(), "to space should be empty now");
 670 
 671     adjust_desired_tenuring_threshold();
 672 
 673     // A successful scavenge should restart the GC time limit count which is


 680     assert(!gch->incremental_collection_failed(), "Should be clear");
 681   } else {
 682     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 683     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 684 
 685     remove_forwarding_pointers();
 686     if (PrintGCDetails) {
 687       gclog_or_tty->print(" (promotion failed) ");
 688     }
 689     // Add to-space to the list of space to compact
 690     // when a promotion failure has occurred.  In that
 691     // case there can be live objects in to-space
 692     // as a result of a partial evacuation of eden
 693     // and from-space.
 694     swap_spaces();   // For uniformity wrt ParNewGeneration.
 695     from()->set_next_compaction_space(to());
 696     gch->set_incremental_collection_failed();
 697 
 698     // Inform the next generation that a promotion failure occurred.
 699     _next_gen->promotion_failure_occurred();
 700     gc_tracer.report_promotion_failed(_promotion_failed_info);
 701 
 702     // Reset the PromotionFailureALot counters.
 703     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 704   }
 705   // set new iteration safe limit for the survivor spaces
 706   from()->set_concurrent_iteration_safe_limit(from()->top());
 707   to()->set_concurrent_iteration_safe_limit(to()->top());
 708   SpecializationStats::print();
 709 
 710   // We need to use a monotonically non-decreasing time in ms
 711   // or we will see time-warp warnings and os::javaTimeMillis()
 712   // does not guarantee monotonicity.
 713   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 714   update_time_of_last_gc(now);
 715 
 716   gch->trace_heap_after_gc(&gc_tracer);
 717   gc_tracer.report_tenuring_threshold(tenuring_threshold());
 718 
 719   _gc_timer->register_gc_end(os::elapsed_counter());
 720 
 721   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 722 }
 723 
 724 class RemoveForwardPointerClosure: public ObjectClosure {
 725 public:
 726   void do_object(oop obj) {
 727     obj->init_mark();
 728   }
 729 };
 730 
 731 void DefNewGeneration::init_assuming_no_promotion_failure() {
 732   _promotion_failed = false;
 733   _promotion_failed_info.reset();
 734   from()->set_next_compaction_space(NULL);
 735 }
 736 
 737 void DefNewGeneration::remove_forwarding_pointers() {
 738   RemoveForwardPointerClosure rspc;
 739   eden()->object_iterate(&rspc);
 740   from()->object_iterate(&rspc);
 741 
 742   // Now restore saved marks, if any.
 743   assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
 744          "should be the same");
 745   while (!_objs_with_preserved_marks.is_empty()) {
 746     oop obj   = _objs_with_preserved_marks.pop();
 747     markOop m = _preserved_marks_of_objs.pop();
 748     obj->set_mark(m);
 749   }
 750   _objs_with_preserved_marks.clear(true);
 751   _preserved_marks_of_objs.clear(true);
 752 }
 753 
 754 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
 755   assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
 756          "Oversaving!");
 757   _objs_with_preserved_marks.push(obj);
 758   _preserved_marks_of_objs.push(m);
 759 }
 760 
 761 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
 762   if (m->must_be_preserved_for_promotion_failure(obj)) {
 763     preserve_mark(obj, m);
 764   }
 765 }
 766 
 767 void DefNewGeneration::handle_promotion_failure(oop old) {
 768   if (PrintPromotionFailure && !_promotion_failed) {
 769     gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
 770                         old->size());
 771   }
 772   _promotion_failed = true;
 773   _promotion_failed_info.register_copy_failure(old->size());
 774   preserve_mark_if_necessary(old, old->mark());
 775   // forward to self
 776   old->forward_to(old);
 777 
 778   _promo_failure_scan_stack.push(old);
 779 
 780   if (!_promo_failure_drain_in_progress) {
 781     // prevent recursion in copy_to_survivor_space()
 782     _promo_failure_drain_in_progress = true;
 783     drain_promo_failure_scan_stack();
 784     _promo_failure_drain_in_progress = false;
 785   }
 786 }
 787 
 788 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 789   assert(is_in_reserved(old) && !old->is_forwarded(),
 790          "shouldn't be scavenging this oop");
 791   size_t s = old->size();
 792   oop obj = NULL;
 793 


 972     from()->check_mangled_unused_area_complete();
 973     to()->check_mangled_unused_area_complete();
 974   }
 975 
 976   if (!CleanChunkPoolAsync) {
 977     Chunk::clean_chunk_pool();
 978   }
 979 
 980   // update the generation and space performance counters
 981   update_counters();
 982   gch->collector_policy()->counters()->update_counters();
 983 }
 984 
 985 void DefNewGeneration::record_spaces_top() {
 986   assert(ZapUnusedHeapArea, "Not mangling unused space");
 987   eden()->set_top_for_allocations();
 988   to()->set_top_for_allocations();
 989   from()->set_top_for_allocations();
 990 }
 991 
 992 void DefNewGeneration::ref_processor_init() {
 993   Generation::ref_processor_init();
 994 }
 995 
 996 
 997 void DefNewGeneration::update_counters() {
 998   if (UsePerfData) {
 999     _eden_counters->update_all();
1000     _from_counters->update_all();
1001     _to_counters->update_all();
1002     _gen_counters->update_all();
1003   }
1004 }
1005 
1006 void DefNewGeneration::verify() {
1007   eden()->verify();
1008   from()->verify();
1009     to()->verify();
1010 }
1011 
1012 void DefNewGeneration::print_on(outputStream* st) const {
1013   Generation::print_on(st);
1014   st->print("  eden");
1015   eden()->print_on(st);