< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




3599 
3600   TaskQueueStats totals;
3601   const uint n = num_task_queues();
3602   for (uint i = 0; i < n; ++i) {
3603     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3604     totals += task_queue(i)->stats;
3605   }
3606   st->print_raw("tot "); totals.print(st); st->cr();
3607 
3608   DEBUG_ONLY(totals.verify());
3609 }
3610 
3611 void G1CollectedHeap::reset_taskqueue_stats() {
3612   const uint n = num_task_queues();
3613   for (uint i = 0; i < n; ++i) {
3614     task_queue(i)->stats.reset();
3615   }
3616 }
3617 #endif // TASKQUEUE_STATS
3618 
3619 void G1CollectedHeap::log_gc_footer(double pause_time_counter) {
3620   if (evacuation_failed()) {
3621     log_info(gc)("To-space exhausted");
3622   }
3623 
3624   double pause_time_sec = TimeHelper::counter_to_seconds(pause_time_counter);
3625   g1_policy()->print_phases(pause_time_sec);
3626 
3627   g1_policy()->print_detailed_heap_transition();
3628 }
3629 
3630 
3631 void G1CollectedHeap::wait_for_root_region_scanning() {
3632   double scan_wait_start = os::elapsedTime();
3633   // We have to wait until the CM threads finish scanning the
3634   // root regions as it's the only way to ensure that all the
3635   // objects on them have been correctly scanned before we start
3636   // moving them during the GC.
3637   bool waited = _cm->root_regions()->wait_until_scan_finished();
3638   double wait_time_ms = 0.0;
3639   if (waited) {
3640     double scan_wait_end = os::elapsedTime();
3641     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3642   }
3643   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3644 }
3645 


3698 
3699     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3700 
3701     GCTraceCPUTime tcpu;
3702 
3703     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3704                                                                   workers()->active_workers(),
3705                                                                   Threads::number_of_non_daemon_threads());
3706     workers()->set_active_workers(active_workers);
3707     FormatBuffer<> gc_string("Pause ");
3708     if (collector_state()->during_initial_mark_pause()) {
3709       gc_string.append("Initial Mark");
3710     } else if (collector_state()->gcs_are_young()) {
3711       gc_string.append("Young");
3712     } else {
3713       gc_string.append("Mixed");
3714     }
3715     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3716 
3717     double pause_start_sec = os::elapsedTime();
3718     double pause_start_counter = os::elapsed_counter();
3719     g1_policy()->note_gc_start(active_workers);
3720 
3721     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3722     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3723 
3724     // If the secondary_free_list is not empty, append it to the
3725     // free_list. No need to wait for the cleanup operation to finish;
3726     // the region allocation code will check the secondary_free_list
3727     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3728     // set, skip this step so that the region allocation code has to
3729     // get entries from the secondary_free_list.
3730     if (!G1StressConcRegionFreeing) {
3731       append_secondary_free_list_if_not_empty_with_lock();
3732     }
3733 
3734     assert(check_young_list_well_formed(), "young list should be well formed");
3735 
3736     // Don't dynamically change the number of GC threads this early.  A value of
3737     // 0 is used to indicate serial work.  When parallel work is done,
3738     // it will be set.




3599 
3600   TaskQueueStats totals;
3601   const uint n = num_task_queues();
3602   for (uint i = 0; i < n; ++i) {
3603     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3604     totals += task_queue(i)->stats;
3605   }
3606   st->print_raw("tot "); totals.print(st); st->cr();
3607 
3608   DEBUG_ONLY(totals.verify());
3609 }
3610 
3611 void G1CollectedHeap::reset_taskqueue_stats() {
3612   const uint n = num_task_queues();
3613   for (uint i = 0; i < n; ++i) {
3614     task_queue(i)->stats.reset();
3615   }
3616 }
3617 #endif // TASKQUEUE_STATS
3618 
3619 void G1CollectedHeap::log_gc_footer(jlong pause_time_counter) {
3620   if (evacuation_failed()) {
3621     log_info(gc)("To-space exhausted");
3622   }
3623 
3624   double pause_time_ms = TimeHelper::counter_to_millis(pause_time_counter);
3625   g1_policy()->print_phases(pause_time_ms);
3626 
3627   g1_policy()->print_detailed_heap_transition();
3628 }
3629 
3630 
3631 void G1CollectedHeap::wait_for_root_region_scanning() {
3632   double scan_wait_start = os::elapsedTime();
3633   // We have to wait until the CM threads finish scanning the
3634   // root regions as it's the only way to ensure that all the
3635   // objects on them have been correctly scanned before we start
3636   // moving them during the GC.
3637   bool waited = _cm->root_regions()->wait_until_scan_finished();
3638   double wait_time_ms = 0.0;
3639   if (waited) {
3640     double scan_wait_end = os::elapsedTime();
3641     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3642   }
3643   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3644 }
3645 


3698 
3699     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3700 
3701     GCTraceCPUTime tcpu;
3702 
3703     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3704                                                                   workers()->active_workers(),
3705                                                                   Threads::number_of_non_daemon_threads());
3706     workers()->set_active_workers(active_workers);
3707     FormatBuffer<> gc_string("Pause ");
3708     if (collector_state()->during_initial_mark_pause()) {
3709       gc_string.append("Initial Mark");
3710     } else if (collector_state()->gcs_are_young()) {
3711       gc_string.append("Young");
3712     } else {
3713       gc_string.append("Mixed");
3714     }
3715     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3716 
3717     double pause_start_sec = os::elapsedTime();
3718     jlong pause_start_counter = os::elapsed_counter();
3719     g1_policy()->note_gc_start(active_workers);
3720 
3721     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3722     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3723 
3724     // If the secondary_free_list is not empty, append it to the
3725     // free_list. No need to wait for the cleanup operation to finish;
3726     // the region allocation code will check the secondary_free_list
3727     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3728     // set, skip this step so that the region allocation code has to
3729     // get entries from the secondary_free_list.
3730     if (!G1StressConcRegionFreeing) {
3731       append_secondary_free_list_if_not_empty_with_lock();
3732     }
3733 
3734     assert(check_young_list_well_formed(), "young list should be well formed");
3735 
3736     // Don't dynamically change the number of GC threads this early.  A value of
3737     // 0 is used to indicate serial work.  When parallel work is done,
3738     // it will be set.


< prev index next >