1068 private:
1069 G1HRPrinter* _hr_printer;
1070 public:
1071 bool doHeapRegion(HeapRegion* hr) {
1072 assert(!hr->is_young(), "not expecting to find young regions");
1073 _hr_printer->post_compaction(hr);
1074 return false;
1075 }
1076
1077 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1078 : _hr_printer(hr_printer) { }
1079 };
1080
1081 void G1CollectedHeap::print_hrm_post_compaction() {
1082 if (_hr_printer.is_active()) {
1083 PostCompactionPrinterClosure cl(hr_printer());
1084 heap_region_iterate(&cl);
1085 }
1086 }
1087
1088 G1HeapVerifier::G1VerifyType G1CollectedHeap::young_verification_type() {
1089 if (collector_state()->yc_type() == Mixed) {
1090 return G1HeapVerifier::G1VerifyMixed;
1091 } else {
1092 return G1HeapVerifier::G1VerifyYoung;
1093 }
1094 }
1095
1096 void G1CollectedHeap::abort_concurrent_cycle() {
1097 // Note: When we have a more flexible GC logging framework that
1098 // allows us to add optional attributes to a GC log record we
1099 // could consider timing and reporting how long we wait in the
1100 // following two methods.
1101 wait_while_free_regions_coming();
1102 // If we start the compaction before the CM threads finish
1103 // scanning the root regions we might trip them over as we'll
1104 // be moving objects / updating references. So let's wait until
1105 // they are done. By telling them to abort, they should complete
1106 // early.
1107 _cm->root_regions()->abort();
1108 _cm->root_regions()->wait_until_scan_finished();
1109 append_secondary_free_list_if_not_empty_with_lock();
1110
1111 // Disable discovery and empty the discovered lists
1112 // for the CM ref processor.
1113 ref_processor_cm()->disable_discovery();
1114 ref_processor_cm()->abandon_partial_discovery();
1115 ref_processor_cm()->verify_no_references_recorded();
2927 // Record whether this pause is an initial mark. When the current
2928 // thread has completed its logging output and it's safe to signal
2929 // the CM thread, the flag's value in the policy has been reset.
2930 bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
2931
2932 // Inner scope for scope based logging, timers, and stats collection
2933 {
2934 EvacuationInfo evacuation_info;
2935
2936 if (collector_state()->during_initial_mark_pause()) {
2937 // We are about to start a marking cycle, so we increment the
2938 // full collection counter.
2939 increment_old_marking_cycles_started();
2940 _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
2941 }
2942
2943 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
2944
2945 GCTraceCPUTime tcpu;
2946
2947 FormatBuffer<> gc_string("Pause ");
2948 if (collector_state()->during_initial_mark_pause()) {
2949 gc_string.append("Initial Mark");
2950 } else if (collector_state()->gcs_are_young()) {
2951 gc_string.append("Young");
2952 } else {
2953 gc_string.append("Mixed");
2954 }
2955 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2956
2957 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2958 workers()->active_workers(),
2959 Threads::number_of_non_daemon_threads());
2960 workers()->update_active_workers(active_workers);
2961 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2962
2963 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2964 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
2965
2966 // If the secondary_free_list is not empty, append it to the
2967 // free_list. No need to wait for the cleanup operation to finish;
2968 // the region allocation code will check the secondary_free_list
2969 // and wait if necessary. If the G1StressConcRegionFreeing flag is
2970 // set, skip this step so that the region allocation code has to
2971 // get entries from the secondary_free_list.
2972 if (!G1StressConcRegionFreeing) {
2973 append_secondary_free_list_if_not_empty_with_lock();
2974 }
2975
2976 G1HeapTransition heap_transition(this);
2977 size_t heap_used_bytes_before_gc = used();
2978
2979 // Don't dynamically change the number of GC threads this early. A value of
2980 // 0 is used to indicate serial work. When parallel work is done,
2981 // it will be set.
2982
2983 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2984 IsGCActiveMark x;
2985
2986 gc_prologue(false);
2987
2988 if (VerifyRememberedSets) {
2989 log_info(gc, verify)("[Verifying RemSets before GC]");
2990 VerifyRegionRemSetClosure v_cl;
2991 heap_region_iterate(&v_cl);
2992 }
2993
2994 _verifier->verify_before_gc(young_verification_type());
2995
2996 _verifier->check_bitmaps("GC Start");
2997
2998 #if COMPILER2_OR_JVMCI
2999 DerivedPointerTable::clear();
3000 #endif
3001
3002 // Please see comment in g1CollectedHeap.hpp and
3003 // G1CollectedHeap::ref_processing_init() to see how
3004 // reference processing currently works in G1.
3005
3006 // Enable discovery in the STW reference processor
3007 if (g1_policy()->should_process_references()) {
3008 ref_processor_stw()->enable_discovery();
3009 } else {
3010 ref_processor_stw()->disable_discovery();
3011 }
3012
3013 {
3014 // We want to temporarily turn off discovery by the
3134 // has just got initialized after the previous CSet was freed.
3135 _cm->verify_no_cset_oops();
3136
3137 // This timing is only used by the ergonomics to handle our pause target.
3138 // It is unclear why this should not include the full pause. We will
3139 // investigate this in CR 7178365.
3140 double sample_end_time_sec = os::elapsedTime();
3141 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3142 size_t total_cards_scanned = g1_policy()->phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards);
3143 g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3144
3145 evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3146 evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
3147
3148 if (VerifyRememberedSets) {
3149 log_info(gc, verify)("[Verifying RemSets after GC]");
3150 VerifyRegionRemSetClosure v_cl;
3151 heap_region_iterate(&v_cl);
3152 }
3153
3154 _verifier->verify_after_gc(young_verification_type());
3155 _verifier->check_bitmaps("GC End");
3156
3157 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3158 ref_processor_stw()->verify_no_references_recorded();
3159
3160 // CM reference discovery will be re-enabled if necessary.
3161 }
3162
3163 #ifdef TRACESPINNING
3164 ParallelTaskTerminator::print_termination_counts();
3165 #endif
3166
3167 gc_epilogue(false);
3168 }
3169
3170 // Print the remainder of the GC log output.
3171 if (evacuation_failed()) {
3172 log_info(gc)("To-space exhausted");
3173 }
3174
|
1068 private:
1069 G1HRPrinter* _hr_printer;
1070 public:
1071 bool doHeapRegion(HeapRegion* hr) {
1072 assert(!hr->is_young(), "not expecting to find young regions");
1073 _hr_printer->post_compaction(hr);
1074 return false;
1075 }
1076
1077 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1078 : _hr_printer(hr_printer) { }
1079 };
1080
1081 void G1CollectedHeap::print_hrm_post_compaction() {
1082 if (_hr_printer.is_active()) {
1083 PostCompactionPrinterClosure cl(hr_printer());
1084 heap_region_iterate(&cl);
1085 }
1086 }
1087
1088 void G1CollectedHeap::abort_concurrent_cycle() {
1089 // Note: When we have a more flexible GC logging framework that
1090 // allows us to add optional attributes to a GC log record we
1091 // could consider timing and reporting how long we wait in the
1092 // following two methods.
1093 wait_while_free_regions_coming();
1094 // If we start the compaction before the CM threads finish
1095 // scanning the root regions we might trip them over as we'll
1096 // be moving objects / updating references. So let's wait until
1097 // they are done. By telling them to abort, they should complete
1098 // early.
1099 _cm->root_regions()->abort();
1100 _cm->root_regions()->wait_until_scan_finished();
1101 append_secondary_free_list_if_not_empty_with_lock();
1102
1103 // Disable discovery and empty the discovered lists
1104 // for the CM ref processor.
1105 ref_processor_cm()->disable_discovery();
1106 ref_processor_cm()->abandon_partial_discovery();
1107 ref_processor_cm()->verify_no_references_recorded();
2919 // Record whether this pause is an initial mark. When the current
2920 // thread has completed its logging output and it's safe to signal
2921 // the CM thread, the flag's value in the policy has been reset.
2922 bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
2923
2924 // Inner scope for scope based logging, timers, and stats collection
2925 {
2926 EvacuationInfo evacuation_info;
2927
2928 if (collector_state()->during_initial_mark_pause()) {
2929 // We are about to start a marking cycle, so we increment the
2930 // full collection counter.
2931 increment_old_marking_cycles_started();
2932 _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
2933 }
2934
2935 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
2936
2937 GCTraceCPUTime tcpu;
2938
2939 G1HeapVerifier::G1VerifyType verify_type;
2940 FormatBuffer<> gc_string("Pause ");
2941 if (collector_state()->during_initial_mark_pause()) {
2942 gc_string.append("Initial Mark");
2943 verify_type = G1HeapVerifier::G1VerifyInitialMark;
2944 } else if (collector_state()->gcs_are_young()) {
2945 gc_string.append("Young");
2946 verify_type = G1HeapVerifier::G1VerifyYoungOnly;
2947 } else {
2948 gc_string.append("Mixed");
2949 verify_type = G1HeapVerifier::G1VerifyMixed;
2950 }
2951 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2952
2953 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2954 workers()->active_workers(),
2955 Threads::number_of_non_daemon_threads());
2956 workers()->update_active_workers(active_workers);
2957 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2958
2959 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2960 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
2961
2962 // If the secondary_free_list is not empty, append it to the
2963 // free_list. No need to wait for the cleanup operation to finish;
2964 // the region allocation code will check the secondary_free_list
2965 // and wait if necessary. If the G1StressConcRegionFreeing flag is
2966 // set, skip this step so that the region allocation code has to
2967 // get entries from the secondary_free_list.
2968 if (!G1StressConcRegionFreeing) {
2969 append_secondary_free_list_if_not_empty_with_lock();
2970 }
2971
2972 G1HeapTransition heap_transition(this);
2973 size_t heap_used_bytes_before_gc = used();
2974
2975 // Don't dynamically change the number of GC threads this early. A value of
2976 // 0 is used to indicate serial work. When parallel work is done,
2977 // it will be set.
2978
2979 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2980 IsGCActiveMark x;
2981
2982 gc_prologue(false);
2983
2984 if (VerifyRememberedSets) {
2985 log_info(gc, verify)("[Verifying RemSets before GC]");
2986 VerifyRegionRemSetClosure v_cl;
2987 heap_region_iterate(&v_cl);
2988 }
2989
2990 _verifier->verify_before_gc(verify_type);
2991
2992 _verifier->check_bitmaps("GC Start");
2993
2994 #if COMPILER2_OR_JVMCI
2995 DerivedPointerTable::clear();
2996 #endif
2997
2998 // Please see comment in g1CollectedHeap.hpp and
2999 // G1CollectedHeap::ref_processing_init() to see how
3000 // reference processing currently works in G1.
3001
3002 // Enable discovery in the STW reference processor
3003 if (g1_policy()->should_process_references()) {
3004 ref_processor_stw()->enable_discovery();
3005 } else {
3006 ref_processor_stw()->disable_discovery();
3007 }
3008
3009 {
3010 // We want to temporarily turn off discovery by the
3130 // has just got initialized after the previous CSet was freed.
3131 _cm->verify_no_cset_oops();
3132
3133 // This timing is only used by the ergonomics to handle our pause target.
3134 // It is unclear why this should not include the full pause. We will
3135 // investigate this in CR 7178365.
3136 double sample_end_time_sec = os::elapsedTime();
3137 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3138 size_t total_cards_scanned = g1_policy()->phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards);
3139 g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3140
3141 evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3142 evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
3143
3144 if (VerifyRememberedSets) {
3145 log_info(gc, verify)("[Verifying RemSets after GC]");
3146 VerifyRegionRemSetClosure v_cl;
3147 heap_region_iterate(&v_cl);
3148 }
3149
3150 _verifier->verify_after_gc(verify_type);
3151 _verifier->check_bitmaps("GC End");
3152
3153 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3154 ref_processor_stw()->verify_no_references_recorded();
3155
3156 // CM reference discovery will be re-enabled if necessary.
3157 }
3158
3159 #ifdef TRACESPINNING
3160 ParallelTaskTerminator::print_termination_counts();
3161 #endif
3162
3163 gc_epilogue(false);
3164 }
3165
3166 // Print the remainder of the GC log output.
3167 if (evacuation_failed()) {
3168 log_info(gc)("To-space exhausted");
3169 }
3170
|