< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




3975   bool waited = _cm->root_regions()->wait_until_scan_finished();
3976   double wait_time_ms = 0.0;
3977   if (waited) {
3978     double scan_wait_end = os::elapsedTime();
3979     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3980   }
3981   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3982 }
3983 
3984 bool
3985 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3986   assert_at_safepoint(true /* should_be_vm_thread */);
3987   guarantee(!is_gc_active(), "collection is not reentrant");
3988 
3989   if (GC_locker::check_active_before_gc()) {
3990     return false;
3991   }
3992 
3993   _gc_timer_stw->register_gc_start();
3994 
3995   GCIdMark gc_id_mark;
3996   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3997 
3998   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3999   ResourceMark rm;
4000 
4001   wait_for_root_region_scanning();
4002 





4003   G1Log::update_level();
4004   print_heap_before_gc();
4005   trace_heap_before_gc(_gc_tracer_stw);
4006 
4007   verify_region_sets_optional();
4008   verify_dirty_young_regions();
4009 
4010   // This call will decide whether this pause is an initial-mark
4011   // pause. If it is, during_initial_mark_pause() will return true
4012   // for the duration of this pause.
4013   g1_policy()->decide_on_conc_mark_initiation();
4014 
4015   // We do not allow initial-mark to be piggy-backed on a mixed GC.
4016   assert(!collector_state()->during_initial_mark_pause() ||
4017           collector_state()->gcs_are_young(), "sanity");
4018 
4019   // We also do not allow mixed GCs during marking.
4020   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
4021 
4022   // Record whether this pause is an initial mark. When the current
4023   // thread has completed its logging output and it's safe to signal
4024   // the CM thread, the flag's value in the policy has been reset.
4025   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
4026 
4027   // Inner scope for scope based logging, timers, and stats collection
4028   {
4029     EvacuationInfo evacuation_info;
4030 
4031     if (collector_state()->during_initial_mark_pause()) {
4032       // We are about to start a marking cycle, so we increment the
4033       // full collection counter.
4034       increment_old_marking_cycles_started();
4035       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
4036     }
4037 
4038     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
4039 
4040     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
4041 
4042     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
4043                                                                   workers()->active_workers(),
4044                                                                   Threads::number_of_non_daemon_threads());
4045     workers()->set_active_workers(active_workers);


4329     TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4330     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4331 
4332     print_heap_after_gc();
4333     trace_heap_after_gc(_gc_tracer_stw);
4334 
4335     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4336     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4337     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4338     // before any GC notifications are raised.
4339     g1mm()->update_sizes();
4340 
4341     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4342     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4343     _gc_timer_stw->register_gc_end();
4344     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4345   }
4346   // It should now be safe to tell the concurrent mark thread to start
4347   // without its logging output interfering with the logging output
4348   // that came from the pause.

4349 
4350   if (should_start_conc_mark) {
4351     // CAUTION: after the doConcurrentMark() call below,
4352     // the concurrent marking thread(s) could be running
4353     // concurrently with us. Make sure that anything after
4354     // this point does not assume that we are the only GC thread
4355     // running. Note: of course, the actual marking work will
4356     // not start until the safepoint itself is released in
4357     // SuspendibleThreadSet::desynchronize().
4358     doConcurrentMark();
4359   }
4360 
4361   return true;
4362 }
4363 
4364 void G1CollectedHeap::remove_self_forwarding_pointers() {
4365   double remove_self_forwards_start = os::elapsedTime();
4366 
4367   G1ParRemoveSelfForwardPtrsTask rsfp_task;
4368   workers()->run_task(&rsfp_task);




3975   bool waited = _cm->root_regions()->wait_until_scan_finished();
3976   double wait_time_ms = 0.0;
3977   if (waited) {
3978     double scan_wait_end = os::elapsedTime();
3979     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3980   }
3981   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3982 }
3983 
3984 bool
3985 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3986   assert_at_safepoint(true /* should_be_vm_thread */);
3987   guarantee(!is_gc_active(), "collection is not reentrant");
3988 
3989   if (GC_locker::check_active_before_gc()) {
3990     return false;
3991   }
3992 
3993   _gc_timer_stw->register_gc_start();
3994 


3995 
3996   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3997   ResourceMark rm;
3998 
3999   wait_for_root_region_scanning();
4000 
4001   bool should_start_conc_mark = fasle;
4002   {
4003     GCIdMark gc_id_mark;
4004     _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
4005 
4006     G1Log::update_level();
4007     print_heap_before_gc();
4008     trace_heap_before_gc(_gc_tracer_stw);
4009 
4010     verify_region_sets_optional();
4011     verify_dirty_young_regions();
4012 
4013     // This call will decide whether this pause is an initial-mark
4014     // pause. If it is, during_initial_mark_pause() will return true
4015     // for the duration of this pause.
4016     g1_policy()->decide_on_conc_mark_initiation();
4017 
4018     // We do not allow initial-mark to be piggy-backed on a mixed GC.
4019     assert(!collector_state()->during_initial_mark_pause() ||
4020            collector_state()->gcs_are_young(), "sanity");
4021 
4022     // We also do not allow mixed GCs during marking.
4023     assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
4024 
4025     // Record whether this pause is an initial mark. When the current
4026     // thread has completed its logging output and it's safe to signal
4027     // the CM thread, the flag's value in the policy has been reset.
4028     should_start_conc_mark = collector_state()->during_initial_mark_pause();
4029 
4030     // Inner scope for scope based logging, timers, and stats collection
4031     {
4032       EvacuationInfo evacuation_info;
4033 
4034       if (collector_state()->during_initial_mark_pause()) {
4035         // We are about to start a marking cycle, so we increment the
4036         // full collection counter.
4037         increment_old_marking_cycles_started();
4038         register_concurrent_cycle_start(_gc_timer_stw->gc_start());
4039       }
4040 
4041       _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
4042 
4043       TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
4044 
4045       uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
4046                                                                     workers()->active_workers(),
4047                                                                     Threads::number_of_non_daemon_threads());
4048       workers()->set_active_workers(active_workers);


4332       TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4333       TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4334 
4335       print_heap_after_gc();
4336       trace_heap_after_gc(_gc_tracer_stw);
4337 
4338       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4339       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4340       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4341       // before any GC notifications are raised.
4342       g1mm()->update_sizes();
4343 
4344       _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4345       _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4346       _gc_timer_stw->register_gc_end();
4347       _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4348     }
4349     // It should now be safe to tell the concurrent mark thread to start
4350     // without its logging output interfering with the logging output
4351     // that came from the pause.
4352   }
4353 
4354   if (should_start_conc_mark) {
4355     // CAUTION: after the doConcurrentMark() call below,
4356     // the concurrent marking thread(s) could be running
4357     // concurrently with us. Make sure that anything after
4358     // this point does not assume that we are the only GC thread
4359     // running. Note: of course, the actual marking work will
4360     // not start until the safepoint itself is released in
4361     // SuspendibleThreadSet::desynchronize().
4362     doConcurrentMark();
4363   }
4364 
4365   return true;
4366 }
4367 
4368 void G1CollectedHeap::remove_self_forwarding_pointers() {
4369   double remove_self_forwards_start = os::elapsedTime();
4370 
4371   G1ParRemoveSelfForwardPtrsTask rsfp_task;
4372   workers()->run_task(&rsfp_task);


< prev index next >