< prev index next >

src/hotspot/share/gc/serial/defNewGeneration.cpp

Print this page




 642     gch->young_process_roots(&srs,
 643                              &fsc_with_no_gc_barrier,
 644                              &fsc_with_gc_barrier,
 645                              &cld_scan_closure);
 646   }
 647 
 648   // "evacuate followers".
 649   evacuate_followers.do_void();
 650 
 651   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 652   ReferenceProcessor* rp = ref_processor();
 653   rp->setup_policy(clear_all_soft_refs);
 654   ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
 655   const ReferenceProcessorStats& stats =
 656   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 657                                     NULL, &pt);
 658   gc_tracer.report_gc_reference_stats(stats);
 659   gc_tracer.report_tenuring_threshold(tenuring_threshold());
 660   pt.print_all_references();
 661 
 662   WeakProcessor::unlink_or_oops_do(&is_alive, &keep_alive);
 663 
 664   if (!_promotion_failed) {
 665     // Swap the survivor spaces.
 666     eden()->clear(SpaceDecorator::Mangle);
 667     from()->clear(SpaceDecorator::Mangle);
 668     if (ZapUnusedHeapArea) {
 669       // This is now done here because of the piece-meal mangling which
 670       // can check for valid mangling at intermediate points in the
 671       // collection(s).  When a young collection fails to collect
 672       // sufficient space resizing of the young generation can occur
 673       // an redistribute the spaces in the young generation.  Mangle
 674       // here so that unzapped regions don't get distributed to
 675       // other spaces.
 676       to()->mangle_unused_area();
 677     }
 678     swap_spaces();
 679 
 680     assert(to()->is_empty(), "to space should be empty now");
 681 
 682     adjust_desired_tenuring_threshold();




 642     gch->young_process_roots(&srs,
 643                              &fsc_with_no_gc_barrier,
 644                              &fsc_with_gc_barrier,
 645                              &cld_scan_closure);
 646   }
 647 
 648   // "evacuate followers".
 649   evacuate_followers.do_void();
 650 
 651   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
 652   ReferenceProcessor* rp = ref_processor();
 653   rp->setup_policy(clear_all_soft_refs);
 654   ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
 655   const ReferenceProcessorStats& stats =
 656   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
 657                                     NULL, &pt);
 658   gc_tracer.report_gc_reference_stats(stats);
 659   gc_tracer.report_tenuring_threshold(tenuring_threshold());
 660   pt.print_all_references();
 661 
 662   WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
 663 
 664   if (!_promotion_failed) {
 665     // Swap the survivor spaces.
 666     eden()->clear(SpaceDecorator::Mangle);
 667     from()->clear(SpaceDecorator::Mangle);
 668     if (ZapUnusedHeapArea) {
 669       // This is now done here because of the piece-meal mangling which
 670       // can check for valid mangling at intermediate points in the
 671       // collection(s).  When a young collection fails to collect
 672       // sufficient space resizing of the young generation can occur
 673       // an redistribute the spaces in the young generation.  Mangle
 674       // here so that unzapped regions don't get distributed to
 675       // other spaces.
 676       to()->mangle_unused_area();
 677     }
 678     swap_spaces();
 679 
 680     assert(to()->is_empty(), "to space should be empty now");
 681 
 682     adjust_desired_tenuring_threshold();


< prev index next >