3673 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3674 if (!G1Log::fine()) {
3675 return;
3676 }
3677
3678 if (G1Log::finer()) {
3679 if (evacuation_failed()) {
3680 gclog_or_tty->print(" (to-space exhausted)");
3681 }
3682 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3683 g1_policy()->phase_times()->note_gc_end();
3684 g1_policy()->phase_times()->print(pause_time_sec);
3685 g1_policy()->print_detailed_heap_transition();
3686 } else {
3687 if (evacuation_failed()) {
3688 gclog_or_tty->print("--");
3689 }
3690 g1_policy()->print_heap_transition();
3691 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3692 }
3693 }
3694
3695 bool
3696 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3697 assert_at_safepoint(true /* should_be_vm_thread */);
3698 guarantee(!is_gc_active(), "collection is not reentrant");
3699
3700 if (GC_locker::check_active_before_gc()) {
3701 return false;
3702 }
3703
3704 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3705 ResourceMark rm;
3706
3707 print_heap_before_gc();
3708
3709 HRSPhaseSetter x(HRSPhaseEvacuation);
3710 verify_region_sets_optional();
3711 verify_dirty_young_regions();
3712
4019 ref_processor_stw()->verify_no_references_recorded();
4020
4021 // CM reference discovery will be re-enabled if necessary.
4022 }
4023
4024 // We should do this after we potentially expand the heap so
4025 // that all the COMMIT events are generated before the end GC
4026 // event, and after we retire the GC alloc regions so that all
4027 // RETIRE events are generated before the end GC event.
4028 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4029
4030 if (mark_in_progress()) {
4031 concurrent_mark()->update_g1_committed();
4032 }
4033
4034 #ifdef TRACESPINNING
4035 ParallelTaskTerminator::print_termination_counts();
4036 #endif
4037
4038 gc_epilogue(false);
4039
4040 log_gc_footer(os::elapsedTime() - pause_start_sec);
4041 }
4042
4043 // It is not yet to safe to tell the concurrent mark to
4044 // start as we have some optional output below. We don't want the
4045 // output from the concurrent mark thread interfering with this
4046 // logging output either.
4047
4048 _hrs.verify_optional();
4049 verify_region_sets_optional();
4050
4051 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4052 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4053
4054 print_heap_after_gc();
4055
4056 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4057 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4058 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4059 // before any GC notifications are raised.
4060 g1mm()->update_sizes();
4061 }
|
3673 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3674 if (!G1Log::fine()) {
3675 return;
3676 }
3677
3678 if (G1Log::finer()) {
3679 if (evacuation_failed()) {
3680 gclog_or_tty->print(" (to-space exhausted)");
3681 }
3682 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3683 g1_policy()->phase_times()->note_gc_end();
3684 g1_policy()->phase_times()->print(pause_time_sec);
3685 g1_policy()->print_detailed_heap_transition();
3686 } else {
3687 if (evacuation_failed()) {
3688 gclog_or_tty->print("--");
3689 }
3690 g1_policy()->print_heap_transition();
3691 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3692 }
3693 gclog_or_tty->flush();
3694 }
3695
3696 bool
3697 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3698 assert_at_safepoint(true /* should_be_vm_thread */);
3699 guarantee(!is_gc_active(), "collection is not reentrant");
3700
3701 if (GC_locker::check_active_before_gc()) {
3702 return false;
3703 }
3704
3705 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3706 ResourceMark rm;
3707
3708 print_heap_before_gc();
3709
3710 HRSPhaseSetter x(HRSPhaseEvacuation);
3711 verify_region_sets_optional();
3712 verify_dirty_young_regions();
3713
4020 ref_processor_stw()->verify_no_references_recorded();
4021
4022 // CM reference discovery will be re-enabled if necessary.
4023 }
4024
4025 // We should do this after we potentially expand the heap so
4026 // that all the COMMIT events are generated before the end GC
4027 // event, and after we retire the GC alloc regions so that all
4028 // RETIRE events are generated before the end GC event.
4029 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4030
4031 if (mark_in_progress()) {
4032 concurrent_mark()->update_g1_committed();
4033 }
4034
4035 #ifdef TRACESPINNING
4036 ParallelTaskTerminator::print_termination_counts();
4037 #endif
4038
4039 gc_epilogue(false);
4040 }
4041
4042 // Print the remainder of the GC log output.
4043 log_gc_footer(os::elapsedTime() - pause_start_sec);
4044
4045 // It is not yet to safe to tell the concurrent mark to
4046 // start as we have some optional output below. We don't want the
4047 // output from the concurrent mark thread interfering with this
4048 // logging output either.
4049
4050 _hrs.verify_optional();
4051 verify_region_sets_optional();
4052
4053 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4054 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4055
4056 print_heap_after_gc();
4057
4058 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4059 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4060 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4061 // before any GC notifications are raised.
4062 g1mm()->update_sizes();
4063 }
|