< prev index next >

src/hotspot/share/gc/g1/g1DefaultPolicy.cpp

Print this page
rev 47446 : [mq]: 8184667-cleanup-g1concurrentmark
rev 47449 : imported patch 8189666
rev 47451 : imported patch 8189729-perc-naming
rev 47452 : [mq]: 8189729-erikd-review


 987       // periodically yields while clearing the next marking bitmap
 988       // and, if it's in a yield point, it's waiting for us to
 989       // finish. So, at this point we will not start a cycle and we'll
 990       // let the concurrent marking thread complete the last one.
 991       log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
 992     }
 993   }
 994 }
 995 
 996 void G1DefaultPolicy::record_concurrent_mark_cleanup_end() {
 997   cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
 998 
 999   double end_sec = os::elapsedTime();
1000   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1001   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1002   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1003 
1004   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1005 }
1006 
1007 double G1DefaultPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1008   return percent_of(reclaimable_bytes, _g1->capacity());
1009 }
1010 
1011 void G1DefaultPolicy::maybe_start_marking() {
1012   if (need_to_start_conc_mark("end of GC")) {
1013     // Note: this might have already been set, if during the last
1014     // pause we decided to start a cycle but at the beginning of
1015     // this pause we decided to postpone it. That's OK.
1016     collector_state()->set_initiate_conc_mark_if_possible(true);
1017   }
1018 }
1019 
1020 G1DefaultPolicy::PauseKind G1DefaultPolicy::young_gc_pause_kind() const {
1021   assert(!collector_state()->full_collection(), "must be");
1022   if (collector_state()->during_initial_mark_pause()) {
1023     assert(collector_state()->last_gc_was_young(), "must be");
1024     assert(!collector_state()->last_young_gc(), "must be");
1025     return InitialMarkGC;
1026   } else if (collector_state()->last_young_gc()) {
1027     assert(!collector_state()->during_initial_mark_pause(), "must be");


1062       _initial_mark_to_mixed.record_mixed_gc_start(start);
1063       break;
1064     default:
1065       ShouldNotReachHere();
1066   }
1067 }
1068 
1069 void G1DefaultPolicy::abort_time_to_mixed_tracking() {
1070   _initial_mark_to_mixed.reset();
1071 }
1072 
1073 bool G1DefaultPolicy::next_gc_should_be_mixed(const char* true_action_str,
1074                                        const char* false_action_str) const {
1075   if (cset_chooser()->is_empty()) {
1076     log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1077     return false;
1078   }
1079 
1080   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1081   size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
1082   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1083   double threshold = (double) G1HeapWastePercent;
1084   if (reclaimable_perc <= threshold) {
1085     log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1086                         false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
1087     return false;
1088   }
1089   log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1090                       true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
1091   return true;
1092 }
1093 
1094 uint G1DefaultPolicy::calc_min_old_cset_length() const {
1095   // The min old CSet region bound is based on the maximum desired
1096   // number of mixed GCs after a cycle. I.e., even if some old regions
1097   // look expensive, we should add them to the CSet anyway to make
1098   // sure we go through the available old regions in no more than the
1099   // maximum desired number of mixed GCs.
1100   //
1101   // The calculation is based on the number of marked regions we added
1102   // to the CSet chooser in the first place, not how many remain, so
1103   // that the result is the same during all mixed GCs that follow a cycle.
1104 
1105   const size_t region_num = (size_t) cset_chooser()->length();
1106   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1107   size_t result = region_num / gc_num;
1108   // emulate ceiling
1109   if (result * gc_num < region_num) {
1110     result += 1;




 987       // periodically yields while clearing the next marking bitmap
 988       // and, if it's in a yield point, it's waiting for us to
 989       // finish. So, at this point we will not start a cycle and we'll
 990       // let the concurrent marking thread complete the last one.
 991       log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
 992     }
 993   }
 994 }
 995 
 996 void G1DefaultPolicy::record_concurrent_mark_cleanup_end() {
 997   cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
 998 
 999   double end_sec = os::elapsedTime();
1000   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1001   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1002   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1003 
1004   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1005 }
1006 
1007 double G1DefaultPolicy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
1008   return percent_of(reclaimable_bytes, _g1->capacity());
1009 }
1010 
1011 void G1DefaultPolicy::maybe_start_marking() {
1012   if (need_to_start_conc_mark("end of GC")) {
1013     // Note: this might have already been set, if during the last
1014     // pause we decided to start a cycle but at the beginning of
1015     // this pause we decided to postpone it. That's OK.
1016     collector_state()->set_initiate_conc_mark_if_possible(true);
1017   }
1018 }
1019 
1020 G1DefaultPolicy::PauseKind G1DefaultPolicy::young_gc_pause_kind() const {
1021   assert(!collector_state()->full_collection(), "must be");
1022   if (collector_state()->during_initial_mark_pause()) {
1023     assert(collector_state()->last_gc_was_young(), "must be");
1024     assert(!collector_state()->last_young_gc(), "must be");
1025     return InitialMarkGC;
1026   } else if (collector_state()->last_young_gc()) {
1027     assert(!collector_state()->during_initial_mark_pause(), "must be");


1062       _initial_mark_to_mixed.record_mixed_gc_start(start);
1063       break;
1064     default:
1065       ShouldNotReachHere();
1066   }
1067 }
1068 
1069 void G1DefaultPolicy::abort_time_to_mixed_tracking() {
1070   _initial_mark_to_mixed.reset();
1071 }
1072 
1073 bool G1DefaultPolicy::next_gc_should_be_mixed(const char* true_action_str,
1074                                        const char* false_action_str) const {
1075   if (cset_chooser()->is_empty()) {
1076     log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1077     return false;
1078   }
1079 
1080   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1081   size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
1082   double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1083   double threshold = (double) G1HeapWastePercent;
1084   if (reclaimable_percent <= threshold) {
1085     log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1086                         false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1087     return false;
1088   }
1089   log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1090                       true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1091   return true;
1092 }
1093 
1094 uint G1DefaultPolicy::calc_min_old_cset_length() const {
1095   // The min old CSet region bound is based on the maximum desired
1096   // number of mixed GCs after a cycle. I.e., even if some old regions
1097   // look expensive, we should add them to the CSet anyway to make
1098   // sure we go through the available old regions in no more than the
1099   // maximum desired number of mixed GCs.
1100   //
1101   // The calculation is based on the number of marked regions we added
1102   // to the CSet chooser in the first place, not how many remain, so
1103   // that the result is the same during all mixed GCs that follow a cycle.
1104 
1105   const size_t region_num = (size_t) cset_chooser()->length();
1106   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1107   size_t result = region_num / gc_num;
1108   // emulate ceiling
1109   if (result * gc_num < region_num) {
1110     result += 1;


< prev index next >