--- old/src/hotspot/share/gc/g1/g1Analytics.cpp 2020-05-18 10:29:16.219435250 +0200 +++ new/src/hotspot/share/gc/g1/g1Analytics.cpp 2020-05-18 10:29:16.131433384 +0200 @@ -149,18 +149,14 @@ _alloc_rate_ms_seq->add(alloc_rate); } -void G1Analytics::compute_pause_time_ratio(double interval_ms, double pause_time_ms) { - _long_term_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms; - // Filter out nonsensical results due to bad input. +void G1Analytics::compute_pause_time_ratios(double end_time_sec, double pause_time_ms) { + double long_interval_ms = (end_time_sec - oldest_known_gc_end_time_sec()) * 1000.0; + _long_term_pause_time_ratio = _recent_gc_times_ms->sum() / long_interval_ms; _long_term_pause_time_ratio = clamp(_long_term_pause_time_ratio, 0.0, 1.0); - // Compute the ratio of just this last pause time to the entire time range stored - // in the vectors. Comparing this pause to the entire range, rather than only the - // most recent interval, has the effect of smoothing over a possible transient 'burst' - // of more frequent pauses that don't really reflect a change in heap occupancy. - // This reduces the likelihood of a needless heap expansion being triggered. - _short_term_pause_time_ratio = - (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; + double short_interval_ms = (end_time_sec - most_recent_gc_end_time_sec()) * 1000.0; + _short_term_pause_time_ratio = pause_time_ms / short_interval_ms; + _short_term_pause_time_ratio = clamp(_short_term_pause_time_ratio, 0.0, 1.0); } void G1Analytics::report_concurrent_refine_rate_ms(double cards_per_ms) { @@ -311,10 +307,14 @@ return predict_size(_pending_cards_seq); } -double G1Analytics::last_known_gc_end_time_sec() const { +double G1Analytics::oldest_known_gc_end_time_sec() const { return _recent_prev_end_times_for_all_gcs_sec->oldest(); } +double G1Analytics::most_recent_gc_end_time_sec() const { + return _recent_prev_end_times_for_all_gcs_sec->last(); +} + void G1Analytics::update_recent_gc_times(double end_time_sec, double pause_time_ms) { _recent_gc_times_ms->add(pause_time_ms); --- old/src/hotspot/share/gc/g1/g1Analytics.hpp 2020-05-18 10:29:16.659444579 +0200 +++ new/src/hotspot/share/gc/g1/g1Analytics.hpp 2020-05-18 10:29:16.579442883 +0200 @@ -88,6 +88,9 @@ size_t predict_size(TruncatedSeq const* seq) const; double predict_zero_bounded(TruncatedSeq const* seq) const; + double oldest_known_gc_end_time_sec() const; + double most_recent_gc_end_time_sec() const; + public: G1Analytics(const G1Predictions* predictor); @@ -160,9 +163,7 @@ // Add a new GC of the given duration and end time to the record. void update_recent_gc_times(double end_time_sec, double elapsed_ms); - void compute_pause_time_ratio(double interval_ms, double pause_time_ms); - - double last_known_gc_end_time_sec() const; + void compute_pause_time_ratios(double end_time_sec, double pause_time_ms); }; #endif // SHARE_GC_G1_G1ANALYTICS_HPP --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2020-05-18 10:29:17.083453568 +0200 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2020-05-18 10:29:17.007451960 +0200 @@ -118,6 +118,9 @@ // We may immediately start allocating regions and placing them on the // collection set list. Initialize the per-collection set info _collection_set->start_incremental_building(); + + double now = os::elapsedTime(); + _analytics->update_recent_gc_times(now, 0.0); } void G1Policy::note_gc_start() { @@ -669,10 +672,8 @@ double alloc_rate_ms = (double) regions_allocated / app_time_ms; _analytics->report_alloc_rate_ms(alloc_rate_ms); - double interval_ms = - (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0; + _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms); _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); - _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms); } if (collector_state()->in_young_gc_before_mixed()) {