< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




3152 
3153   TaskQueueStats totals;
3154   const uint n = num_task_queues();
3155   for (uint i = 0; i < n; ++i) {
3156     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3157     totals += task_queue(i)->stats;
3158   }
3159   st->print_raw("tot "); totals.print(st); st->cr();
3160 
3161   DEBUG_ONLY(totals.verify());
3162 }
3163 
3164 void G1CollectedHeap::reset_taskqueue_stats() {
3165   const uint n = num_task_queues();
3166   for (uint i = 0; i < n; ++i) {
3167     task_queue(i)->stats.reset();
3168   }
3169 }
3170 #endif // TASKQUEUE_STATS
3171 
3172 void G1CollectedHeap::log_gc_footer(jlong pause_time_counter) {
3173   if (evacuation_failed()) {
3174     log_info(gc)("To-space exhausted");
3175   }
3176 
3177   double pause_time_ms = TimeHelper::counter_to_millis(pause_time_counter);
3178   g1_policy()->print_phases(pause_time_ms);
3179 
3180   g1_policy()->print_detailed_heap_transition();
3181 }
3182 
3183 
3184 void G1CollectedHeap::wait_for_root_region_scanning() {
3185   double scan_wait_start = os::elapsedTime();
3186   // We have to wait until the CM threads finish scanning the
3187   // root regions as it's the only way to ensure that all the
3188   // objects on them have been correctly scanned before we start
3189   // moving them during the GC.
3190   bool waited = _cm->root_regions()->wait_until_scan_finished();
3191   double wait_time_ms = 0.0;
3192   if (waited) {
3193     double scan_wait_end = os::elapsedTime();
3194     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3195   }
3196   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3197 }
3198 


3250     }
3251 
3252     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3253 
3254     GCTraceCPUTime tcpu;
3255 
3256     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3257                                                                   workers()->active_workers(),
3258                                                                   Threads::number_of_non_daemon_threads());
3259     workers()->set_active_workers(active_workers);
3260     FormatBuffer<> gc_string("Pause ");
3261     if (collector_state()->during_initial_mark_pause()) {
3262       gc_string.append("Initial Mark");
3263     } else if (collector_state()->gcs_are_young()) {
3264       gc_string.append("Young");
3265     } else {
3266       gc_string.append("Mixed");
3267     }
3268     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3269 
3270     jlong pause_start_counter = os::elapsed_counter();
3271     g1_policy()->note_gc_start(active_workers);
3272 
3273     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3274     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3275 
3276     // If the secondary_free_list is not empty, append it to the
3277     // free_list. No need to wait for the cleanup operation to finish;
3278     // the region allocation code will check the secondary_free_list
3279     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3280     // set, skip this step so that the region allocation code has to
3281     // get entries from the secondary_free_list.
3282     if (!G1StressConcRegionFreeing) {
3283       append_secondary_free_list_if_not_empty_with_lock();
3284     }
3285 
3286     assert(check_young_list_well_formed(), "young list should be well formed");
3287 
3288     // Don't dynamically change the number of GC threads this early.  A value of
3289     // 0 is used to indicate serial work.  When parallel work is done,
3290     // it will be set.


3512           heap_region_iterate(&v_cl);
3513         }
3514 
3515         _verifier->verify_after_gc();
3516         _verifier->check_bitmaps("GC End");
3517 
3518         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3519         ref_processor_stw()->verify_no_references_recorded();
3520 
3521         // CM reference discovery will be re-enabled if necessary.
3522       }
3523 
3524 #ifdef TRACESPINNING
3525       ParallelTaskTerminator::print_termination_counts();
3526 #endif
3527 
3528       gc_epilogue(false);
3529     }
3530 
3531     // Print the remainder of the GC log output.
3532     log_gc_footer(os::elapsed_counter() - pause_start_counter);
3533 
3534     // It is not yet to safe to tell the concurrent mark to
3535     // start as we have some optional output below. We don't want the
3536     // output from the concurrent mark thread interfering with this
3537     // logging output either.
3538 
3539     _hrm.verify_optional();
3540     _verifier->verify_region_sets_optional();
3541 
3542     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3543     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3544 
3545     print_heap_after_gc();
3546     trace_heap_after_gc(_gc_tracer_stw);
3547 
3548     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3549     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3550     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3551     // before any GC notifications are raised.
3552     g1mm()->update_sizes();




3152 
3153   TaskQueueStats totals;
3154   const uint n = num_task_queues();
3155   for (uint i = 0; i < n; ++i) {
3156     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3157     totals += task_queue(i)->stats;
3158   }
3159   st->print_raw("tot "); totals.print(st); st->cr();
3160 
3161   DEBUG_ONLY(totals.verify());
3162 }
3163 
3164 void G1CollectedHeap::reset_taskqueue_stats() {
3165   const uint n = num_task_queues();
3166   for (uint i = 0; i < n; ++i) {
3167     task_queue(i)->stats.reset();
3168   }
3169 }
3170 #endif // TASKQUEUE_STATS
3171 
3172 void G1CollectedHeap::log_gc_footer() {
3173   if (evacuation_failed()) {
3174     log_info(gc)("To-space exhausted");
3175   }
3176 
3177   g1_policy()->print_phases();

3178 
3179   g1_policy()->print_detailed_heap_transition();
3180 }
3181 
3182 
3183 void G1CollectedHeap::wait_for_root_region_scanning() {
3184   double scan_wait_start = os::elapsedTime();
3185   // We have to wait until the CM threads finish scanning the
3186   // root regions as it's the only way to ensure that all the
3187   // objects on them have been correctly scanned before we start
3188   // moving them during the GC.
3189   bool waited = _cm->root_regions()->wait_until_scan_finished();
3190   double wait_time_ms = 0.0;
3191   if (waited) {
3192     double scan_wait_end = os::elapsedTime();
3193     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3194   }
3195   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3196 }
3197 


3249     }
3250 
3251     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3252 
3253     GCTraceCPUTime tcpu;
3254 
3255     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3256                                                                   workers()->active_workers(),
3257                                                                   Threads::number_of_non_daemon_threads());
3258     workers()->set_active_workers(active_workers);
3259     FormatBuffer<> gc_string("Pause ");
3260     if (collector_state()->during_initial_mark_pause()) {
3261       gc_string.append("Initial Mark");
3262     } else if (collector_state()->gcs_are_young()) {
3263       gc_string.append("Young");
3264     } else {
3265       gc_string.append("Mixed");
3266     }
3267     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3268 

3269     g1_policy()->note_gc_start(active_workers);
3270 
3271     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3272     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3273 
3274     // If the secondary_free_list is not empty, append it to the
3275     // free_list. No need to wait for the cleanup operation to finish;
3276     // the region allocation code will check the secondary_free_list
3277     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3278     // set, skip this step so that the region allocation code has to
3279     // get entries from the secondary_free_list.
3280     if (!G1StressConcRegionFreeing) {
3281       append_secondary_free_list_if_not_empty_with_lock();
3282     }
3283 
3284     assert(check_young_list_well_formed(), "young list should be well formed");
3285 
3286     // Don't dynamically change the number of GC threads this early.  A value of
3287     // 0 is used to indicate serial work.  When parallel work is done,
3288     // it will be set.


3510           heap_region_iterate(&v_cl);
3511         }
3512 
3513         _verifier->verify_after_gc();
3514         _verifier->check_bitmaps("GC End");
3515 
3516         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3517         ref_processor_stw()->verify_no_references_recorded();
3518 
3519         // CM reference discovery will be re-enabled if necessary.
3520       }
3521 
3522 #ifdef TRACESPINNING
3523       ParallelTaskTerminator::print_termination_counts();
3524 #endif
3525 
3526       gc_epilogue(false);
3527     }
3528 
3529     // Print the remainder of the GC log output.
3530     log_gc_footer();
3531 
3532     // It is not yet to safe to tell the concurrent mark to
3533     // start as we have some optional output below. We don't want the
3534     // output from the concurrent mark thread interfering with this
3535     // logging output either.
3536 
3537     _hrm.verify_optional();
3538     _verifier->verify_region_sets_optional();
3539 
3540     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3541     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3542 
3543     print_heap_after_gc();
3544     trace_heap_after_gc(_gc_tracer_stw);
3545 
3546     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3547     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3548     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3549     // before any GC notifications are raised.
3550     g1mm()->update_sizes();


< prev index next >