src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-gc-g1-gc-timestamp Sdiff src/share/vm/gc_implementation/g1

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 3618 : 7198130: G1: PrintReferenceGC output comes out of order
Summary: Move the first part of the GC logging, including timestamp, to the start of the GC
Reviewed-by: johnc


3646 
3647   TaskQueueStats totals;
3648   const int n = workers() != NULL ? workers()->total_workers() : 1;
3649   for (int i = 0; i < n; ++i) {
3650     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3651     totals += task_queue(i)->stats;
3652   }
3653   st->print_raw("tot "); totals.print(st); st->cr();
3654 
3655   DEBUG_ONLY(totals.verify());
3656 }
3657 
3658 void G1CollectedHeap::reset_taskqueue_stats() {
3659   const int n = workers() != NULL ? workers()->total_workers() : 1;
3660   for (int i = 0; i < n; ++i) {
3661     task_queue(i)->stats.reset();
3662   }
3663 }
3664 #endif // TASKQUEUE_STATS
3665 







































3666 bool
3667 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3668   assert_at_safepoint(true /* should_be_vm_thread */);
3669   guarantee(!is_gc_active(), "collection is not reentrant");
3670 
3671   if (GC_locker::check_active_before_gc()) {
3672     return false;
3673   }
3674 
3675   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3676   ResourceMark rm;
3677 
3678   print_heap_before_gc();
3679 
3680   HRSPhaseSetter x(HRSPhaseEvacuation);
3681   verify_region_sets_optional();
3682   verify_dirty_young_regions();
3683 
3684   // This call will decide whether this pause is an initial-mark
3685   // pause. If it is, during_initial_mark_pause() will return true


3688 
3689   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3690   assert(!g1_policy()->during_initial_mark_pause() ||
3691           g1_policy()->gcs_are_young(), "sanity");
3692 
3693   // We also do not allow mixed GCs during marking.
3694   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3695 
3696   // Record whether this pause is an initial mark. When the current
3697   // thread has completed its logging output and it's safe to signal
3698   // the CM thread, the flag's value in the policy has been reset.
3699   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3700 
3701   // Inner scope for scope based logging, timers, and stats collection
3702   {
3703     if (g1_policy()->during_initial_mark_pause()) {
3704       // We are about to start a marking cycle, so we increment the
3705       // full collection counter.
3706       increment_old_marking_cycles_started();
3707     }
3708     // if the log level is "finer" is on, we'll print long statistics information
3709     // in the collector policy code, so let's not print this as the output
3710     // is messy if we do.
3711     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
3712     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3713 
3714     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3715                                 workers()->active_workers() : 1);
3716     double pause_start_sec = os::elapsedTime();
3717     g1_policy()->phase_times()->note_gc_start(active_workers);
3718     bool initial_mark_gc = g1_policy()->during_initial_mark_pause();
3719 
3720     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3721     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3722 
3723     // If the secondary_free_list is not empty, append it to the
3724     // free_list. No need to wait for the cleanup operation to finish;
3725     // the region allocation code will check the secondary_free_list
3726     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3727     // set, skip this step so that the region allocation code has to
3728     // get entries from the secondary_free_list.
3729     if (!G1StressConcRegionFreeing) {
3730       append_secondary_free_list_if_not_empty_with_lock();
3731     }
3732 
3733     assert(check_young_list_well_formed(),
3734       "young list should be well formed");
3735 
3736     // Don't dynamically change the number of GC threads this early.  A value of
3737     // 0 is used to indicate serial work.  When parallel work is done,
3738     // it will be set.


3995 
3996         // CM reference discovery will be re-enabled if necessary.
3997       }
3998 
3999       // We should do this after we potentially expand the heap so
4000       // that all the COMMIT events are generated before the end GC
4001       // event, and after we retire the GC alloc regions so that all
4002       // RETIRE events are generated before the end GC event.
4003       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4004 
4005       if (mark_in_progress()) {
4006         concurrent_mark()->update_g1_committed();
4007       }
4008 
4009 #ifdef TRACESPINNING
4010       ParallelTaskTerminator::print_termination_counts();
4011 #endif
4012 
4013       gc_epilogue(false);
4014 
4015       if (G1Log::fine()) {
4016         if (PrintGCTimeStamps) {
4017           gclog_or_tty->stamp();
4018           gclog_or_tty->print(": ");
4019         }
4020 
4021         GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
4022           .append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)")
4023           .append(initial_mark_gc ? " (initial-mark)" : "");
4024 
4025         double pause_time_sec = os::elapsedTime() - pause_start_sec;
4026 
4027         if (G1Log::finer()) {
4028           if (evacuation_failed()) {
4029             gc_cause_str.append(" (to-space exhausted)");
4030           }
4031           gclog_or_tty->print_cr("[%s, %3.7f secs]", (const char*)gc_cause_str, pause_time_sec);
4032           g1_policy()->phase_times()->note_gc_end();
4033           g1_policy()->phase_times()->print(pause_time_sec);
4034           g1_policy()->print_detailed_heap_transition();
4035         } else {
4036           if (evacuation_failed()) {
4037             gc_cause_str.append("--");
4038           }
4039           gclog_or_tty->print("[%s", (const char*)gc_cause_str);
4040       g1_policy()->print_heap_transition();
4041           gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
4042         }
4043       }
4044     }
4045 
4046     // It is not yet to safe to tell the concurrent mark to
4047     // start as we have some optional output below. We don't want the
4048     // output from the concurrent mark thread interfering with this
4049     // logging output either.
4050 
4051     _hrs.verify_optional();
4052     verify_region_sets_optional();
4053 
4054     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4055     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4056 
4057     print_heap_after_gc();
4058 
4059     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4060     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4061     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4062     // before any GC notifications are raised.
4063     g1mm()->update_sizes();




3646 
3647   TaskQueueStats totals;
3648   const int n = workers() != NULL ? workers()->total_workers() : 1;
3649   for (int i = 0; i < n; ++i) {
3650     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3651     totals += task_queue(i)->stats;
3652   }
3653   st->print_raw("tot "); totals.print(st); st->cr();
3654 
3655   DEBUG_ONLY(totals.verify());
3656 }
3657 
3658 void G1CollectedHeap::reset_taskqueue_stats() {
3659   const int n = workers() != NULL ? workers()->total_workers() : 1;
3660   for (int i = 0; i < n; ++i) {
3661     task_queue(i)->stats.reset();
3662   }
3663 }
3664 #endif // TASKQUEUE_STATS
3665 
3666 void G1CollectedHeap::log_gc_header() {
3667   if (!G1Log::fine()) {
3668     return;
3669   }
3670   gclog_or_tty->date_stamp(PrintGCDateStamps);
3671   if (PrintGCTimeStamps) {
3672     gclog_or_tty->stamp();
3673     gclog_or_tty->print(": ");
3674   }
3675 
3676   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3677     .append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)")
3678     .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3679 
3680   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3681 }
3682 
3683 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3684   if (!G1Log::fine()) {
3685     return;
3686   }
3687 
3688   if (G1Log::finer()) {
3689     if (evacuation_failed()) {
3690       gclog_or_tty->print(" (to-space exhausted)");
3691     }
3692     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3693     g1_policy()->phase_times()->note_gc_end();
3694     g1_policy()->phase_times()->print(pause_time_sec);
3695     g1_policy()->print_detailed_heap_transition();
3696   } else {
3697     if (evacuation_failed()) {
3698       gclog_or_tty->print("--");
3699     }
3700     g1_policy()->print_heap_transition();
3701     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3702   }
3703 }
3704 
3705 bool
3706 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3707   assert_at_safepoint(true /* should_be_vm_thread */);
3708   guarantee(!is_gc_active(), "collection is not reentrant");
3709 
3710   if (GC_locker::check_active_before_gc()) {
3711     return false;
3712   }
3713 
3714   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3715   ResourceMark rm;
3716 
3717   print_heap_before_gc();
3718 
3719   HRSPhaseSetter x(HRSPhaseEvacuation);
3720   verify_region_sets_optional();
3721   verify_dirty_young_regions();
3722 
3723   // This call will decide whether this pause is an initial-mark
3724   // pause. If it is, during_initial_mark_pause() will return true


3727 
3728   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3729   assert(!g1_policy()->during_initial_mark_pause() ||
3730           g1_policy()->gcs_are_young(), "sanity");
3731 
3732   // We also do not allow mixed GCs during marking.
3733   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3734 
3735   // Record whether this pause is an initial mark. When the current
3736   // thread has completed its logging output and it's safe to signal
3737   // the CM thread, the flag's value in the policy has been reset.
3738   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3739 
3740   // Inner scope for scope based logging, timers, and stats collection
3741   {
3742     if (g1_policy()->during_initial_mark_pause()) {
3743       // We are about to start a marking cycle, so we increment the
3744       // full collection counter.
3745       increment_old_marking_cycles_started();
3746     }




3747     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3748 
3749     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3750                                 workers()->active_workers() : 1);
3751     double pause_start_sec = os::elapsedTime();
3752     g1_policy()->phase_times()->note_gc_start(active_workers);
3753     log_gc_header();
3754 
3755     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3756     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3757 
3758     // If the secondary_free_list is not empty, append it to the
3759     // free_list. No need to wait for the cleanup operation to finish;
3760     // the region allocation code will check the secondary_free_list
3761     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3762     // set, skip this step so that the region allocation code has to
3763     // get entries from the secondary_free_list.
3764     if (!G1StressConcRegionFreeing) {
3765       append_secondary_free_list_if_not_empty_with_lock();
3766     }
3767 
3768     assert(check_young_list_well_formed(),
3769       "young list should be well formed");
3770 
3771     // Don't dynamically change the number of GC threads this early.  A value of
3772     // 0 is used to indicate serial work.  When parallel work is done,
3773     // it will be set.


4030 
4031         // CM reference discovery will be re-enabled if necessary.
4032       }
4033 
4034       // We should do this after we potentially expand the heap so
4035       // that all the COMMIT events are generated before the end GC
4036       // event, and after we retire the GC alloc regions so that all
4037       // RETIRE events are generated before the end GC event.
4038       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4039 
4040       if (mark_in_progress()) {
4041         concurrent_mark()->update_g1_committed();
4042       }
4043 
4044 #ifdef TRACESPINNING
4045       ParallelTaskTerminator::print_termination_counts();
4046 #endif
4047 
4048       gc_epilogue(false);
4049 
4050       log_gc_footer(os::elapsedTime() - pause_start_sec);




























4051     }
4052 
4053     // It is not yet to safe to tell the concurrent mark to
4054     // start as we have some optional output below. We don't want the
4055     // output from the concurrent mark thread interfering with this
4056     // logging output either.
4057 
4058     _hrs.verify_optional();
4059     verify_region_sets_optional();
4060 
4061     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4062     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4063 
4064     print_heap_after_gc();
4065 
4066     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4067     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4068     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4069     // before any GC notifications are raised.
4070     g1mm()->update_sizes();


src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File