3889 3890 // We should do this after we potentially expand the heap so 3891 // that all the COMMIT events are generated before the end GC 3892 // event, and after we retire the GC alloc regions so that all 3893 // RETIRE events are generated before the end GC event. 3894 _hr_printer.end_gc(false /* full */, (size_t) total_collections()); 3895 3896 // We have to do this after we decide whether to expand the heap or not. 3897 g1_policy()->print_heap_transition(); 3898 3899 if (mark_in_progress()) { 3900 concurrent_mark()->update_g1_committed(); 3901 } 3902 3903 #ifdef TRACESPINNING 3904 ParallelTaskTerminator::print_termination_counts(); 3905 #endif 3906 3907 gc_epilogue(false); 3908 } 3909 3910 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { 3911 gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); 3912 print_tracing_info(); 3913 vm_exit(-1); 3914 } 3915 } 3916 3917 // The closing of the inner scope, immediately above, will complete 3918 // logging at the "fine" level. The record_collection_pause_end() call 3919 // above will complete logging at the "finer" level. 3920 // 3921 // It is not yet to safe, however, to tell the concurrent mark to 3922 // start as we have some optional output below. We don't want the 3923 // output from the concurrent mark thread interfering with this 3924 // logging output either. 3925 3926 _hrs.verify_optional(); 3927 verify_region_sets_optional(); 3928 3929 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); 3930 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); 3931 3932 print_heap_after_gc(); 3933 g1mm()->update_sizes(); 3934 | 3889 3890 // We should do this after we potentially expand the heap so 3891 // that all the COMMIT events are generated before the end GC 3892 // event, and after we retire the GC alloc regions so that all 3893 // RETIRE events are generated before the end GC event. 3894 _hr_printer.end_gc(false /* full */, (size_t) total_collections()); 3895 3896 // We have to do this after we decide whether to expand the heap or not. 3897 g1_policy()->print_heap_transition(); 3898 3899 if (mark_in_progress()) { 3900 concurrent_mark()->update_g1_committed(); 3901 } 3902 3903 #ifdef TRACESPINNING 3904 ParallelTaskTerminator::print_termination_counts(); 3905 #endif 3906 3907 gc_epilogue(false); 3908 } 3909 } 3910 3911 // The closing of the inner scope, immediately above, will complete 3912 // logging at the "fine" level. The record_collection_pause_end() call 3913 // above will complete logging at the "finer" level. 3914 // 3915 // It is not yet to safe, however, to tell the concurrent mark to 3916 // start as we have some optional output below. We don't want the 3917 // output from the concurrent mark thread interfering with this 3918 // logging output either. 3919 3920 _hrs.verify_optional(); 3921 verify_region_sets_optional(); 3922 3923 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); 3924 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); 3925 3926 print_heap_after_gc(); 3927 g1mm()->update_sizes(); 3928 |