< prev index next >

src/hotspot/share/gc/cms/parNewGeneration.cpp

Print this page




 982   // Can  the mt_degree be set later (at run_task() time would be best)?
 983   rp->set_active_mt_degree(active_workers);
 984   ReferenceProcessorStats stats;
 985   ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
 986   if (rp->processing_is_mt()) {
 987     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
 988     stats = rp->process_discovered_references(&is_alive, &keep_alive,
 989                                               &evacuate_followers, &task_executor,
 990                                               &pt);
 991   } else {
 992     thread_state_set.flush();
 993     gch->save_marks();
 994     stats = rp->process_discovered_references(&is_alive, &keep_alive,
 995                                               &evacuate_followers, NULL,
 996                                               &pt);
 997   }
 998   _gc_tracer.report_gc_reference_stats(stats);
 999   _gc_tracer.report_tenuring_threshold(tenuring_threshold());
1000   pt.print_all_references();
1001 
1002   WeakProcessor::unlink_or_oops_do(&is_alive, &keep_alive);
1003 
1004   if (!promotion_failed()) {
1005     // Swap the survivor spaces.
1006     eden()->clear(SpaceDecorator::Mangle);
1007     from()->clear(SpaceDecorator::Mangle);
1008     if (ZapUnusedHeapArea) {
1009       // This is now done here because of the piece-meal mangling which
1010       // can check for valid mangling at intermediate points in the
1011       // collection(s).  When a young collection fails to collect
1012       // sufficient space resizing of the young generation can occur
1013       // and redistribute the spaces in the young generation.  Mangle
1014       // here so that unzapped regions don't get distributed to
1015       // other spaces.
1016       to()->mangle_unused_area();
1017     }
1018     swap_spaces();
1019 
1020     // A successful scavenge should restart the GC time limit count which is
1021     // for full GC's.
1022     size_policy->reset_gc_overhead_limit_count();




 982   // Can  the mt_degree be set later (at run_task() time would be best)?
 983   rp->set_active_mt_degree(active_workers);
 984   ReferenceProcessorStats stats;
 985   ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
 986   if (rp->processing_is_mt()) {
 987     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
 988     stats = rp->process_discovered_references(&is_alive, &keep_alive,
 989                                               &evacuate_followers, &task_executor,
 990                                               &pt);
 991   } else {
 992     thread_state_set.flush();
 993     gch->save_marks();
 994     stats = rp->process_discovered_references(&is_alive, &keep_alive,
 995                                               &evacuate_followers, NULL,
 996                                               &pt);
 997   }
 998   _gc_tracer.report_gc_reference_stats(stats);
 999   _gc_tracer.report_tenuring_threshold(tenuring_threshold());
1000   pt.print_all_references();
1001 
1002   WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1003 
1004   if (!promotion_failed()) {
1005     // Swap the survivor spaces.
1006     eden()->clear(SpaceDecorator::Mangle);
1007     from()->clear(SpaceDecorator::Mangle);
1008     if (ZapUnusedHeapArea) {
1009       // This is now done here because of the piece-meal mangling which
1010       // can check for valid mangling at intermediate points in the
1011       // collection(s).  When a young collection fails to collect
1012       // sufficient space resizing of the young generation can occur
1013       // and redistribute the spaces in the young generation.  Mangle
1014       // here so that unzapped regions don't get distributed to
1015       // other spaces.
1016       to()->mangle_unused_area();
1017     }
1018     swap_spaces();
1019 
1020     // A successful scavenge should restart the GC time limit count which is
1021     // for full GC's.
1022     size_policy->reset_gc_overhead_limit_count();


< prev index next >