< prev index next >

src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp

Print this page
rev 47452 : [mq]: 8189729-erikd-review


  34 G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics) :
  35       _g1(g1),
  36       _analytics(analytics),
  37       _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) {
  38     assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics);
  39     clear_ratio_check_data();
  40   }
  41 
  42 void G1HeapSizingPolicy::clear_ratio_check_data() {
  43   _ratio_over_threshold_count = 0;
  44   _ratio_over_threshold_sum = 0.0;
  45   _pauses_since_start = 0;
  46 }
  47 
  48 size_t G1HeapSizingPolicy::expansion_amount() {
  49   double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
  50   double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
  51   assert(GCTimeRatio > 0,
  52          "we should have set it to a default value set_g1_gc_flags() "
  53          "if a user set it to 0");
  54   const double gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
  55 
  56   double threshold = gc_overhead_perc;
  57   size_t expand_bytes = 0;
  58 
  59   // If the heap is at less than half its maximum size, scale the threshold down,
  60   // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
  61   // though the scaling code will likely keep the increase small.
  62   if (_g1->capacity() <= _g1->max_capacity() / 2) {
  63     threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
  64     threshold = MAX2(threshold, 1.0);
  65   }
  66 
  67   // If the last GC time ratio is over the threshold, increment the count of
  68   // times it has been exceeded, and add this ratio to the sum of exceeded
  69   // ratios.
  70   if (last_gc_overhead > threshold) {
  71     _ratio_over_threshold_count++;
  72     _ratio_over_threshold_sum += last_gc_overhead;
  73   }
  74 
  75   // Check if we've had enough GC time ratio checks that were over the
  76   // threshold to trigger an expansion. We'll also expand if we've


  90 
  91     // If the current size is less than 1/4 of the Initial heap size, expand
  92     // by half of the delta between the current and Initial sizes. IE, grow
  93     // back quickly.
  94     //
  95     // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
  96     // the available expansion space, whichever is smaller, as the base
  97     // expansion size. Then possibly scale this size according to how much the
  98     // threshold has (on average) been exceeded by. If the delta is small
  99     // (less than the StartScaleDownAt value), scale the size down linearly, but
 100     // not by less than MinScaleDownFactor. If the delta is large (greater than
 101     // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
 102     // times the base size. The scaling will be linear in the range from
 103     // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
 104     // ScaleUpRange sets the rate of scaling up.
 105     if (committed_bytes < InitialHeapSize / 4) {
 106       expand_bytes = (InitialHeapSize - committed_bytes) / 2;
 107     } else {
 108       double const MinScaleDownFactor = 0.2;
 109       double const MaxScaleUpFactor = 2;
 110       double const StartScaleDownAt = gc_overhead_perc;
 111       double const StartScaleUpAt = gc_overhead_perc * 1.5;
 112       double const ScaleUpRange = gc_overhead_perc * 2.0;
 113 
 114       double ratio_delta;
 115       if (filled_history_buffer) {
 116         ratio_delta = recent_gc_overhead - threshold;
 117       } else {
 118         ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
 119       }
 120 
 121       expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
 122       if (ratio_delta < StartScaleDownAt) {
 123         scale_factor = ratio_delta / StartScaleDownAt;
 124         scale_factor = MAX2(scale_factor, MinScaleDownFactor);
 125       } else if (ratio_delta > StartScaleUpAt) {
 126         scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
 127         scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
 128       }
 129     }
 130 
 131     log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
 132                               "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",




  34 G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics) :
  35       _g1(g1),
  36       _analytics(analytics),
  37       _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) {
  38     assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics);
  39     clear_ratio_check_data();
  40   }
  41 
  42 void G1HeapSizingPolicy::clear_ratio_check_data() {
  43   _ratio_over_threshold_count = 0;
  44   _ratio_over_threshold_sum = 0.0;
  45   _pauses_since_start = 0;
  46 }
  47 
  48 size_t G1HeapSizingPolicy::expansion_amount() {
  49   double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
  50   double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
  51   assert(GCTimeRatio > 0,
  52          "we should have set it to a default value set_g1_gc_flags() "
  53          "if a user set it to 0");
  54   const double gc_overhead_percent = 100.0 * (1.0 / (1.0 + GCTimeRatio));
  55 
  56   double threshold = gc_overhead_percent;
  57   size_t expand_bytes = 0;
  58 
  59   // If the heap is at less than half its maximum size, scale the threshold down,
  60   // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
  61   // though the scaling code will likely keep the increase small.
  62   if (_g1->capacity() <= _g1->max_capacity() / 2) {
  63     threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
  64     threshold = MAX2(threshold, 1.0);
  65   }
  66 
  67   // If the last GC time ratio is over the threshold, increment the count of
  68   // times it has been exceeded, and add this ratio to the sum of exceeded
  69   // ratios.
  70   if (last_gc_overhead > threshold) {
  71     _ratio_over_threshold_count++;
  72     _ratio_over_threshold_sum += last_gc_overhead;
  73   }
  74 
  75   // Check if we've had enough GC time ratio checks that were over the
  76   // threshold to trigger an expansion. We'll also expand if we've


  90 
  91     // If the current size is less than 1/4 of the Initial heap size, expand
  92     // by half of the delta between the current and Initial sizes. IE, grow
  93     // back quickly.
  94     //
  95     // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
  96     // the available expansion space, whichever is smaller, as the base
  97     // expansion size. Then possibly scale this size according to how much the
  98     // threshold has (on average) been exceeded by. If the delta is small
  99     // (less than the StartScaleDownAt value), scale the size down linearly, but
 100     // not by less than MinScaleDownFactor. If the delta is large (greater than
 101     // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
 102     // times the base size. The scaling will be linear in the range from
 103     // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
 104     // ScaleUpRange sets the rate of scaling up.
 105     if (committed_bytes < InitialHeapSize / 4) {
 106       expand_bytes = (InitialHeapSize - committed_bytes) / 2;
 107     } else {
 108       double const MinScaleDownFactor = 0.2;
 109       double const MaxScaleUpFactor = 2;
 110       double const StartScaleDownAt = gc_overhead_percent;
 111       double const StartScaleUpAt = gc_overhead_percent * 1.5;
 112       double const ScaleUpRange = gc_overhead_percent * 2.0;
 113 
 114       double ratio_delta;
 115       if (filled_history_buffer) {
 116         ratio_delta = recent_gc_overhead - threshold;
 117       } else {
 118         ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
 119       }
 120 
 121       expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
 122       if (ratio_delta < StartScaleDownAt) {
 123         scale_factor = ratio_delta / StartScaleDownAt;
 124         scale_factor = MAX2(scale_factor, MinScaleDownFactor);
 125       } else if (ratio_delta > StartScaleUpAt) {
 126         scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
 127         scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
 128       }
 129     }
 130 
 131     log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
 132                               "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",


< prev index next >