< prev index next >

src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp

Print this page
rev 59304 : [mq]: 8245087-kim-sjohanss-review

*** 48,103 **** _ratio_over_threshold_count = 0; _ratio_over_threshold_sum = 0.0; _pauses_since_start = 0; } size_t G1HeapSizingPolicy::expansion_amount() { ! assert(GCTimeRatio > 0, ! "we should have set it to a default value set_g1_gc_flags() " ! "if a user set it to 0"); ! double long_term_gc_overhead = _analytics->long_term_pause_time_ratio() * 100.0; ! double short_term_gc_overhead = _analytics->short_term_pause_time_ratio() * 100.0; size_t expand_bytes = 0; if (_g1h->capacity() == _g1h->max_capacity()) { ! log_trace(gc, ergo, heap)("Can not expand (heap already fully expanded) " ! "long term GC overhead: %1.2f %% committed: " SIZE_FORMAT "B", ! long_term_gc_overhead, _g1h->capacity()); clear_ratio_check_data(); return expand_bytes; } ! const double gc_overhead_percent = 100.0 * (1.0 / (1.0 + GCTimeRatio)); ! double threshold = gc_overhead_percent; ! ! // If the heap is at less than half its maximum size, scale the threshold down, ! // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, ! // though the scaling code will likely keep the increase small. ! if (_g1h->capacity() <= _g1h->max_capacity() / 2) { ! threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2); ! threshold = MAX2(threshold, 1.0); ! } // If the last GC time ratio is over the threshold, increment the count of // times it has been exceeded, and add this ratio to the sum of exceeded // ratios. ! if (short_term_gc_overhead > threshold) { _ratio_over_threshold_count++; ! _ratio_over_threshold_sum += short_term_gc_overhead; } - // Check if we've had enough GC time ratio checks that were over the // threshold to trigger an expansion. We'll also expand if we've // reached the end of the history buffer and the average of all entries // is still over the threshold. This indicates a smaller number of GCs were // long enough to make the average exceed the threshold. bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics; if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || ! (filled_history_buffer && (long_term_gc_overhead > threshold))) { size_t min_expand_bytes = HeapRegion::GrainBytes; size_t reserved_bytes = _g1h->max_capacity(); size_t committed_bytes = _g1h->capacity(); size_t uncommitted_bytes = reserved_bytes - committed_bytes; size_t expand_bytes_via_pct = --- 48,105 ---- _ratio_over_threshold_count = 0; _ratio_over_threshold_sum = 0.0; _pauses_since_start = 0; } + double G1HeapSizingPolicy::scale_with_heap(double pause_time_threshold) { + double threshold = pause_time_threshold; + // If the heap is at less than half its maximum size, scale the threshold down, + // to a limit of 1%. Thus the smaller the heap is, the more likely it is to expand, + // though the scaling code will likely keep the increase small. + if (_g1h->capacity() <= _g1h->max_capacity() / 2) { + threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2); + threshold = MAX2(threshold, 0.01); + } + + return threshold; + } + size_t G1HeapSizingPolicy::expansion_amount() { ! assert(GCTimeRatio > 0, "must be"); ! double long_term_pause_time_ratio = _analytics->long_term_pause_time_ratio(); ! double short_term_pause_time_ratio = _analytics->short_term_pause_time_ratio(); size_t expand_bytes = 0; if (_g1h->capacity() == _g1h->max_capacity()) { ! log_trace(gc, ergo, heap)("Cannot expand (heap already fully expanded) " ! "long term GC overhead: %1.2f%% committed: " SIZE_FORMAT "B", ! long_term_pause_time_ratio * 100.0, _g1h->capacity()); clear_ratio_check_data(); return expand_bytes; } ! const double pause_time_threshold = 1.0 / (1.0 + GCTimeRatio); ! double threshold = scale_with_heap(pause_time_threshold); // If the last GC time ratio is over the threshold, increment the count of // times it has been exceeded, and add this ratio to the sum of exceeded // ratios. ! if (short_term_pause_time_ratio > threshold) { _ratio_over_threshold_count++; ! _ratio_over_threshold_sum += short_term_pause_time_ratio; } // Check if we've had enough GC time ratio checks that were over the // threshold to trigger an expansion. We'll also expand if we've // reached the end of the history buffer and the average of all entries // is still over the threshold. This indicates a smaller number of GCs were // long enough to make the average exceed the threshold. bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics; if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || ! (filled_history_buffer && (long_term_pause_time_ratio > threshold))) { size_t min_expand_bytes = HeapRegion::GrainBytes; size_t reserved_bytes = _g1h->max_capacity(); size_t committed_bytes = _g1h->capacity(); size_t uncommitted_bytes = reserved_bytes - committed_bytes; size_t expand_bytes_via_pct =
*** 121,139 **** if (committed_bytes < InitialHeapSize / 4) { expand_bytes = (InitialHeapSize - committed_bytes) / 2; } else { double const MinScaleDownFactor = 0.2; double const MaxScaleUpFactor = 2; ! double const StartScaleDownAt = gc_overhead_percent; ! double const StartScaleUpAt = gc_overhead_percent * 1.5; ! double const ScaleUpRange = gc_overhead_percent * 2.0; double ratio_delta; if (filled_history_buffer) { ! ratio_delta = long_term_gc_overhead - threshold; } else { ! ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; } expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); if (ratio_delta < StartScaleDownAt) { scale_factor = ratio_delta / StartScaleDownAt; --- 123,141 ---- if (committed_bytes < InitialHeapSize / 4) { expand_bytes = (InitialHeapSize - committed_bytes) / 2; } else { double const MinScaleDownFactor = 0.2; double const MaxScaleUpFactor = 2; ! double const StartScaleDownAt = pause_time_threshold; ! double const StartScaleUpAt = pause_time_threshold * 1.5; ! double const ScaleUpRange = pause_time_threshold * 2.0; double ratio_delta; if (filled_history_buffer) { ! ratio_delta = long_term_pause_time_ratio - threshold; } else { ! ratio_delta = (_ratio_over_threshold_sum / _ratio_over_threshold_count) - threshold; } expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); if (ratio_delta < StartScaleDownAt) { scale_factor = ratio_delta / StartScaleDownAt;
*** 143,154 **** scale_factor = MIN2(scale_factor, MaxScaleUpFactor); } } log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " ! "long term GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", ! long_term_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); // Ensure the expansion size is at least the minimum growth amount // and at most the remaining uncommitted byte size. --- 145,158 ---- scale_factor = MIN2(scale_factor, MaxScaleUpFactor); } } log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " ! "long term GC overhead: %1.2f%% threshold: %1.2f%% uncommitted: " SIZE_FORMAT "B " ! "base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", ! long_term_pause_time_ratio * 100.0, threshold * 100.0, ! uncommitted_bytes, expand_bytes, scale_factor * 100.0); expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); // Ensure the expansion size is at least the minimum growth amount // and at most the remaining uncommitted byte size.
< prev index next >