< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page
rev 10472 : 8151711: Move G1 number sequences out of the G1 collector policy
Reviewed-by:
rev 10473 : [mq]: rename-to-analytics
rev 10474 : [mq]: fixindent
rev 10475 : 8151637: Move CollectionSetChooser rebuild code into CollectionSetChooser
Reviewed-by:
rev 10476 : 8151808: Factor G1 heap sizing code out of the G1CollectorPolicy
Reviewed-by:


  32 #include "gc/g1/g1ConcurrentMark.hpp"
  33 #include "gc/g1/g1IHOPControl.hpp"
  34 #include "gc/g1/g1GCPhaseTimes.hpp"
  35 #include "gc/g1/g1YoungGenSizer.hpp"
  36 #include "gc/g1/heapRegion.inline.hpp"
  37 #include "gc/g1/heapRegionRemSet.hpp"
  38 #include "gc/shared/gcPolicyCounters.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/java.hpp"
  41 #include "runtime/mutexLocker.hpp"
  42 #include "utilities/debug.hpp"
  43 #include "utilities/pair.hpp"
  44 
  45 G1CollectorPolicy::G1CollectorPolicy() :
  46   _predictor(G1ConfidencePercent / 100.0),
  47   _analytics(new G1Analytics(&_predictor)),
  48   _pause_time_target_ms((double) MaxGCPauseMillis),
  49   _rs_lengths_prediction(0),
  50   _max_survivor_regions(0),
  51   _survivors_age_table(true),
  52   _gc_overhead_perc(0.0),
  53 
  54   _bytes_allocated_in_old_since_last_gc(0),
  55   _ihop_control(NULL),
  56   _initial_mark_to_mixed() {
  57 
  58   // SurvRateGroups below must be initialized after the predictor because they
  59   // indirectly use it through this object passed to their constructor.
  60   _short_lived_surv_rate_group =
  61     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
  62   _survivor_surv_rate_group =
  63     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
  64 
  65   // Set up the region size and associated fields. Given that the
  66   // policy is created before the heap, we have to set this up here,
  67   // so it's done as soon as possible.
  68 
  69   // It would have been natural to pass initial_heap_byte_size() and
  70   // max_heap_byte_size() to setup_heap_region_size() but those have
  71   // not been set up at this point since they should be aligned with
  72   // the region size. So, there is a circular dependency here. We base
  73   // the region size on the heap size, but the heap size should be
  74   // aligned with the region size. To get around this we use the
  75   // unaligned values for the heap.
  76   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
  77   HeapRegionRemSet::setup_remset_size();
  78 
  79   clear_ratio_check_data();
  80 
  81   _phase_times = new G1GCPhaseTimes(ParallelGCThreads);
  82 
  83   // Below, we might need to calculate the pause time target based on
  84   // the pause interval. When we do so we are going to give G1 maximum
  85   // flexibility and allow it to do pauses when it needs to. So, we'll
  86   // arrange that the pause interval to be pause time target + 1 to
  87   // ensure that a) the pause time target is maximized with respect to
  88   // the pause interval and b) we maintain the invariant that pause
  89   // time target < pause interval. If the user does not want this
  90   // maximum flexibility, they will have to set the pause interval
  91   // explicitly.
  92 
  93   // First make sure that, if either parameter is set, its value is
  94   // reasonable.
  95   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
  96     if (MaxGCPauseMillis < 1) {
  97       vm_exit_during_initialization("MaxGCPauseMillis should be "
  98                                     "greater than 0");
  99     }
 100   }


 125   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 126     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 127   }
 128 
 129   // Finally, make sure that the two parameters are consistent.
 130   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 131     char buffer[256];
 132     jio_snprintf(buffer, 256,
 133                  "MaxGCPauseMillis (%u) should be less than "
 134                  "GCPauseIntervalMillis (%u)",
 135                  MaxGCPauseMillis, GCPauseIntervalMillis);
 136     vm_exit_during_initialization(buffer);
 137   }
 138 
 139   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 140   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 141   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 142 
 143   _tenuring_threshold = MaxTenuringThreshold;
 144 
 145   assert(GCTimeRatio > 0,
 146          "we should have set it to a default value set_g1_gc_flags() "
 147          "if a user set it to 0");
 148   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 149 
 150   uintx reserve_perc = G1ReservePercent;
 151   // Put an artificial ceiling on this so that it's not set to a silly value.
 152   if (reserve_perc > 50) {
 153     reserve_perc = 50;
 154     warning("G1ReservePercent is set to a value that is too large, "
 155             "it's been updated to " UINTX_FORMAT, reserve_perc);
 156   }
 157   _reserve_factor = (double) reserve_perc / 100.0;
 158   // This will be set when the heap is expanded
 159   // for the first time during initialization.
 160   _reserve_regions = 0;
 161 
 162   _ihop_control = create_ihop_control();
 163 }
 164 
 165 G1CollectorPolicy::~G1CollectorPolicy() {
 166   delete _ihop_control;
 167 }
 168 


1066   size_t rs_length = hr->rem_set()->occupied();
1067   // Predicting the number of cards is based on which type of GC
1068   // we're predicting for.
1069   size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
1070   size_t bytes_to_copy = predict_bytes_to_copy(hr);
1071 
1072   double region_elapsed_time_ms =
1073     _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
1074     _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
1075 
1076   // The prediction of the "other" time for this region is based
1077   // upon the region type and NOT the GC type.
1078   if (hr->is_young()) {
1079     region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
1080   } else {
1081     region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
1082   }
1083   return region_elapsed_time_ms;
1084 }
1085 
1086 void G1CollectorPolicy::clear_ratio_check_data() {
1087   _ratio_over_threshold_count = 0;
1088   _ratio_over_threshold_sum = 0.0;
1089   _pauses_since_start = 0;
1090 }
1091 
1092 size_t G1CollectorPolicy::expansion_amount() {
1093   double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
1094   double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
1095   double threshold = _gc_overhead_perc;
1096   size_t expand_bytes = 0;
1097 
1098   // If the heap is at less than half its maximum size, scale the threshold down,
1099   // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
1100   // though the scaling code will likely keep the increase small.
1101   if (_g1->capacity() <= _g1->max_capacity() / 2) {
1102     threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
1103     threshold = MAX2(threshold, 1.0);
1104   }
1105 
1106   // If the last GC time ratio is over the threshold, increment the count of
1107   // times it has been exceeded, and add this ratio to the sum of exceeded
1108   // ratios.
1109   if (last_gc_overhead > threshold) {
1110     _ratio_over_threshold_count++;
1111     _ratio_over_threshold_sum += last_gc_overhead;
1112   }
1113 
1114   // Check if we've had enough GC time ratio checks that were over the
1115   // threshold to trigger an expansion. We'll also expand if we've
1116   // reached the end of the history buffer and the average of all entries
1117   // is still over the threshold. This indicates a smaller number of GCs were
1118   // long enough to make the average exceed the threshold.
1119   bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics;
1120   if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
1121       (filled_history_buffer && (recent_gc_overhead > threshold))) {
1122     size_t min_expand_bytes = HeapRegion::GrainBytes;
1123     size_t reserved_bytes = _g1->max_capacity();
1124     size_t committed_bytes = _g1->capacity();
1125     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1126     size_t expand_bytes_via_pct =
1127       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1128     double scale_factor = 1.0;
1129 
1130     // If the current size is less than 1/4 of the Initial heap size, expand
1131     // by half of the delta between the current and Initial sizes. IE, grow
1132     // back quickly.
1133     //
1134     // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
1135     // the available expansion space, whichever is smaller, as the base
1136     // expansion size. Then possibly scale this size according to how much the
1137     // threshold has (on average) been exceeded by. If the delta is small
1138     // (less than the StartScaleDownAt value), scale the size down linearly, but
1139     // not by less than MinScaleDownFactor. If the delta is large (greater than
1140     // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
1141     // times the base size. The scaling will be linear in the range from
1142     // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
1143     // ScaleUpRange sets the rate of scaling up.
1144     if (committed_bytes < InitialHeapSize / 4) {
1145       expand_bytes = (InitialHeapSize - committed_bytes) / 2;
1146     } else {
1147       double const MinScaleDownFactor = 0.2;
1148       double const MaxScaleUpFactor = 2;
1149       double const StartScaleDownAt = _gc_overhead_perc;
1150       double const StartScaleUpAt = _gc_overhead_perc * 1.5;
1151       double const ScaleUpRange = _gc_overhead_perc * 2.0;
1152 
1153       double ratio_delta;
1154       if (filled_history_buffer) {
1155         ratio_delta = recent_gc_overhead - threshold;
1156       } else {
1157         ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
1158       }
1159 
1160       expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1161       if (ratio_delta < StartScaleDownAt) {
1162         scale_factor = ratio_delta / StartScaleDownAt;
1163         scale_factor = MAX2(scale_factor, MinScaleDownFactor);
1164       } else if (ratio_delta > StartScaleUpAt) {
1165         scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
1166         scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
1167       }
1168     }
1169 
1170     log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
1171                               "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
1172                               recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
1173 
1174     expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
1175 
1176     // Ensure the expansion size is at least the minimum growth amount
1177     // and at most the remaining uncommitted byte size.
1178     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1179     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1180 
1181     clear_ratio_check_data();
1182   } else {
1183     // An expansion was not triggered. If we've started counting, increment
1184     // the number of checks we've made in the current window.  If we've
1185     // reached the end of the window without resizing, clear the counters to
1186     // start again the next time we see a ratio above the threshold.
1187     if (_ratio_over_threshold_count > 0) {
1188       _pauses_since_start++;
1189       if (_pauses_since_start > NumPrevPausesForHeuristics) {
1190         clear_ratio_check_data();
1191       }
1192     }
1193   }
1194 
1195   return expand_bytes;
1196 }
1197 
1198 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1199 #ifndef PRODUCT
1200   _short_lived_surv_rate_group->print_surv_rate_summary();
1201   // add this call for any other surv rate groups
1202 #endif // PRODUCT
1203 }
1204 
1205 bool G1CollectorPolicy::is_young_list_full() const {
1206   uint young_list_length = _g1->young_list()->length();
1207   uint young_list_target_length = _young_list_target_length;
1208   return young_list_length >= young_list_target_length;
1209 }
1210 
1211 bool G1CollectorPolicy::can_expand_young_list() const {
1212   uint young_list_length = _g1->young_list()->length();
1213   uint young_list_max_length = _young_list_max_length;
1214   return young_list_length < young_list_max_length;
1215 }
1216 




  32 #include "gc/g1/g1ConcurrentMark.hpp"
  33 #include "gc/g1/g1IHOPControl.hpp"
  34 #include "gc/g1/g1GCPhaseTimes.hpp"
  35 #include "gc/g1/g1YoungGenSizer.hpp"
  36 #include "gc/g1/heapRegion.inline.hpp"
  37 #include "gc/g1/heapRegionRemSet.hpp"
  38 #include "gc/shared/gcPolicyCounters.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/java.hpp"
  41 #include "runtime/mutexLocker.hpp"
  42 #include "utilities/debug.hpp"
  43 #include "utilities/pair.hpp"
  44 
  45 G1CollectorPolicy::G1CollectorPolicy() :
  46   _predictor(G1ConfidencePercent / 100.0),
  47   _analytics(new G1Analytics(&_predictor)),
  48   _pause_time_target_ms((double) MaxGCPauseMillis),
  49   _rs_lengths_prediction(0),
  50   _max_survivor_regions(0),
  51   _survivors_age_table(true),

  52 
  53   _bytes_allocated_in_old_since_last_gc(0),
  54   _ihop_control(NULL),
  55   _initial_mark_to_mixed() {
  56 
  57   // SurvRateGroups below must be initialized after the predictor because they
  58   // indirectly use it through this object passed to their constructor.
  59   _short_lived_surv_rate_group =
  60     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
  61   _survivor_surv_rate_group =
  62     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
  63 
  64   // Set up the region size and associated fields. Given that the
  65   // policy is created before the heap, we have to set this up here,
  66   // so it's done as soon as possible.
  67 
  68   // It would have been natural to pass initial_heap_byte_size() and
  69   // max_heap_byte_size() to setup_heap_region_size() but those have
  70   // not been set up at this point since they should be aligned with
  71   // the region size. So, there is a circular dependency here. We base
  72   // the region size on the heap size, but the heap size should be
  73   // aligned with the region size. To get around this we use the
  74   // unaligned values for the heap.
  75   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
  76   HeapRegionRemSet::setup_remset_size();
  77 


  78   _phase_times = new G1GCPhaseTimes(ParallelGCThreads);
  79 
  80   // Below, we might need to calculate the pause time target based on
  81   // the pause interval. When we do so we are going to give G1 maximum
  82   // flexibility and allow it to do pauses when it needs to. So, we'll
  83   // arrange that the pause interval to be pause time target + 1 to
  84   // ensure that a) the pause time target is maximized with respect to
  85   // the pause interval and b) we maintain the invariant that pause
  86   // time target < pause interval. If the user does not want this
  87   // maximum flexibility, they will have to set the pause interval
  88   // explicitly.
  89 
  90   // First make sure that, if either parameter is set, its value is
  91   // reasonable.
  92   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
  93     if (MaxGCPauseMillis < 1) {
  94       vm_exit_during_initialization("MaxGCPauseMillis should be "
  95                                     "greater than 0");
  96     }
  97   }


 122   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 123     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 124   }
 125 
 126   // Finally, make sure that the two parameters are consistent.
 127   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 128     char buffer[256];
 129     jio_snprintf(buffer, 256,
 130                  "MaxGCPauseMillis (%u) should be less than "
 131                  "GCPauseIntervalMillis (%u)",
 132                  MaxGCPauseMillis, GCPauseIntervalMillis);
 133     vm_exit_during_initialization(buffer);
 134   }
 135 
 136   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 137   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 138   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 139 
 140   _tenuring_threshold = MaxTenuringThreshold;
 141 




 142 
 143   uintx reserve_perc = G1ReservePercent;
 144   // Put an artificial ceiling on this so that it's not set to a silly value.
 145   if (reserve_perc > 50) {
 146     reserve_perc = 50;
 147     warning("G1ReservePercent is set to a value that is too large, "
 148             "it's been updated to " UINTX_FORMAT, reserve_perc);
 149   }
 150   _reserve_factor = (double) reserve_perc / 100.0;
 151   // This will be set when the heap is expanded
 152   // for the first time during initialization.
 153   _reserve_regions = 0;
 154 
 155   _ihop_control = create_ihop_control();
 156 }
 157 
 158 G1CollectorPolicy::~G1CollectorPolicy() {
 159   delete _ihop_control;
 160 }
 161 


1059   size_t rs_length = hr->rem_set()->occupied();
1060   // Predicting the number of cards is based on which type of GC
1061   // we're predicting for.
1062   size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
1063   size_t bytes_to_copy = predict_bytes_to_copy(hr);
1064 
1065   double region_elapsed_time_ms =
1066     _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
1067     _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
1068 
1069   // The prediction of the "other" time for this region is based
1070   // upon the region type and NOT the GC type.
1071   if (hr->is_young()) {
1072     region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
1073   } else {
1074     region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
1075   }
1076   return region_elapsed_time_ms;
1077 }
1078 















































































































1079 
1080 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1081 #ifndef PRODUCT
1082   _short_lived_surv_rate_group->print_surv_rate_summary();
1083   // add this call for any other surv rate groups
1084 #endif // PRODUCT
1085 }
1086 
1087 bool G1CollectorPolicy::is_young_list_full() const {
1088   uint young_list_length = _g1->young_list()->length();
1089   uint young_list_target_length = _young_list_target_length;
1090   return young_list_length >= young_list_target_length;
1091 }
1092 
1093 bool G1CollectorPolicy::can_expand_young_list() const {
1094   uint young_list_length = _g1->young_list()->length();
1095   uint young_list_max_length = _young_list_max_length;
1096   return young_list_length < young_list_max_length;
1097 }
1098 


< prev index next >