Print this page
G1: Use SoftMaxHeapSize to guide GC heuristics
@@ -24,10 +24,11 @@
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
#include "gc/g1/g1Analytics.hpp"
+#include "gc/g1/g1Policy.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -36,11 +37,12 @@
}
G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics) :
_g1h(g1h),
_analytics(analytics),
- _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) {
+ _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()),
+ _minimum_desired_bytes_after_last_cm(MinHeapSize) {
assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics);
clear_ratio_check_data();
}
@@ -48,11 +50,11 @@
_ratio_over_threshold_count = 0;
_ratio_over_threshold_sum = 0.0;
_pauses_since_start = 0;
}
-size_t G1HeapSizingPolicy::expansion_amount() {
+size_t G1HeapSizingPolicy::expansion_amount_after_young_collection() {
double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
assert(GCTimeRatio > 0,
"we should have set it to a default value set_g1_gc_flags() "
"if a user set it to 0");
@@ -158,5 +160,54 @@
}
}
return expand_bytes;
}
+
+size_t G1HeapSizingPolicy::target_heap_capacity(size_t used_bytes, uintx free_ratio) {
+ const double free_percentage = (double) free_ratio / 100.0;
+ const double used_percentage = 1.0 - free_percentage;
+
+ // We have to be careful here as these two calculations can overflow
+ // 32-bit size_t's.
+ double used_bytes_d = (double) used_bytes;
+ double desired_capacity_d = used_bytes_d / used_percentage;
+ // Let's make sure that they are both under the max heap size, which
+ // by default will make it fit into a size_t.
+ double desired_capacity_upper_bound = (double) MaxHeapSize;
+ desired_capacity_d = MIN2(desired_capacity_d, desired_capacity_upper_bound);
+ // We can now safely turn it into size_t's.
+ return (size_t) desired_capacity_d;
+}
+
+size_t G1HeapSizingPolicy::expansion_amount_after_concurrent_mark() {
+ size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
+
+ size_t minimum_desired_capacity = target_heap_capacity(cur_used_bytes, MinHeapFreeRatio);
+
+ _minimum_desired_bytes_after_last_cm = _g1h->policy()->minimum_desired_bytes_after_concurrent_mark(cur_used_bytes);
+ // Use the smaller one between minimum_desired_capacity
+ // and predicted minimum_desired_bytes_after_concurrent_mark
+ // We still use minimum_desired_capacity because minimum_desired_bytes_after_concurrent_mark
+ // might include a lot of new allocated humongous objects
+ _minimum_desired_bytes_after_last_cm = MIN2(_minimum_desired_bytes_after_last_cm, minimum_desired_capacity);
+
+ return _minimum_desired_bytes_after_last_cm > _g1h->capacity() ?
+ _minimum_desired_bytes_after_last_cm - _g1h->capacity() : 0;
+}
+
+size_t G1HeapSizingPolicy::shrink_amount_after_mixed_collections() {
+ size_t shrink_bytes = 0;
+ const size_t capacity_after_gc = _g1h->capacity();
+ const size_t used_after_gc = capacity_after_gc - _g1h->unused_committed_regions_in_bytes();
+ size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio);
+ // soft_max_capacity can be smaller
+ maximum_desired_capacity = MIN2(maximum_desired_capacity, _g1h->soft_max_capacity());
+ // Make sure not less than _minimum_desired_bytes_after_last_cm
+ maximum_desired_capacity = MAX2(maximum_desired_capacity, _minimum_desired_bytes_after_last_cm);
+
+ if (capacity_after_gc > maximum_desired_capacity) {
+ shrink_bytes = capacity_after_gc - maximum_desired_capacity;
+ }
+
+ return shrink_bytes;
+}