# HG changeset patch # User mgerdin # Date 1457439450 -3600 # Tue Mar 08 13:17:30 2016 +0100 # Node ID 2861183ab82e4fdca18a84f06a2da28d4811402f # Parent 7131b00438998b8de6654c91a4d622a622303ea8 8151711: Move G1 number sequences out of the G1 collector policy Reviewed-by: diff --git a/src/share/vm/gc/g1/concurrentMarkThread.cpp b/src/share/vm/gc/g1/concurrentMarkThread.cpp --- a/src/share/vm/gc/g1/concurrentMarkThread.cpp +++ b/src/share/vm/gc/g1/concurrentMarkThread.cpp @@ -27,6 +27,7 @@ #include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" +#include "gc/g1/g1Measurements.hpp" #include "gc/g1/g1MMUTracker.hpp" #include "gc/g1/suspendibleThreadSet.hpp" #include "gc/g1/vm_operations_g1.hpp" @@ -82,10 +83,11 @@ // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU. void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) { + const G1Measurements* measurements = g1_policy->measurements(); if (g1_policy->adaptive_young_list_length()) { double now = os::elapsedTime(); - double prediction_ms = remark ? g1_policy->predict_remark_time_ms() - : g1_policy->predict_cleanup_time_ms(); + double prediction_ms = remark ? measurements->predict_remark_time_ms() + : measurements->predict_cleanup_time_ms(); G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms); os::sleep(this, sleep_time_ms, false); diff --git a/src/share/vm/gc/g1/g1CollectorPolicy.cpp b/src/share/vm/gc/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc/g1/g1CollectorPolicy.cpp @@ -31,6 +31,7 @@ #include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1IHOPControl.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" +#include "gc/g1/g1Measurements.hpp" #include "gc/g1/g1YoungGenSizer.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" @@ -41,85 +42,13 @@ #include "utilities/debug.hpp" #include "utilities/pair.hpp" -// Different defaults for different number of GC threads -// They were chosen by running GCOld and SPECjbb on debris with different -// numbers of GC threads and choosing them based on the results - -// all the same -static double rs_length_diff_defaults[] = { - 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 -}; - -static double cost_per_card_ms_defaults[] = { - 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 -}; - -// all the same -static double young_cards_per_entry_ratio_defaults[] = { - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 -}; - -static double cost_per_entry_ms_defaults[] = { - 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 -}; - -static double cost_per_byte_ms_defaults[] = { - 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 -}; - -// these should be pretty consistent -static double constant_other_time_ms_defaults[] = { - 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 -}; - - -static double young_other_cost_per_region_ms_defaults[] = { - 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 -}; - -static double non_young_other_cost_per_region_ms_defaults[] = { - 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 -}; - G1CollectorPolicy::G1CollectorPolicy() : _predictor(G1ConfidencePercent / 100.0), - - _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), - - _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), - _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), - - _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _prev_collection_pause_end_ms(0.0), - _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), - _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), - _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), - _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _non_young_other_cost_per_region_ms_seq( - new TruncatedSeq(TruncatedSeqLength)), - - _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), - _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), - + _measurements(new G1Measurements(&_predictor)), _pause_time_target_ms((double) MaxGCPauseMillis), - - _recent_prev_end_times_for_all_gcs_sec( - new TruncatedSeq(NumPrevPausesForHeuristics)), - - _recent_avg_pause_time_ratio(0.0), _rs_lengths_prediction(0), _max_survivor_regions(0), - - // add here any more surv rate groups _survivors_age_table(true), - _gc_overhead_perc(0.0), _bytes_allocated_in_old_since_last_gc(0), @@ -147,27 +76,10 @@ HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); HeapRegionRemSet::setup_remset_size(); - _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); - _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; clear_ratio_check_data(); _phase_times = new G1GCPhaseTimes(ParallelGCThreads); - int index = MIN2(ParallelGCThreads - 1, 7u); - - _rs_length_diff_seq->add(rs_length_diff_defaults[index]); - _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); - _cost_scan_hcc_seq->add(0.0); - _young_cards_per_entry_ratio_seq->add( - young_cards_per_entry_ratio_defaults[index]); - _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); - _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); - _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); - _young_other_cost_per_region_ms_seq->add( - young_other_cost_per_region_ms_defaults[index]); - _non_young_other_cost_per_region_ms_seq->add( - non_young_other_cost_per_region_ms_defaults[index]); - // Below, we might need to calculate the pause time target based on // the pause interval. When we do so we are going to give G1 maximum // flexibility and allow it to do pauses when it needs to. So, we'll @@ -228,9 +140,6 @@ double time_slice = (double) GCPauseIntervalMillis / 1000.0; _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); - // start conservatively (around 50ms is about right) - _concurrent_mark_remark_times_ms->add(0.05); - _concurrent_mark_cleanup_times_ms->add(0.20); _tenuring_threshold = MaxTenuringThreshold; assert(GCTimeRatio > 0, @@ -257,14 +166,6 @@ delete _ihop_control; } -double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { - return _predictor.get_new_prediction(seq); -} - -size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const { - return (size_t)get_new_prediction(seq); -} - void G1CollectorPolicy::initialize_alignments() { _space_alignment = HeapRegion::GrainBytes; size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); @@ -339,8 +240,9 @@ double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); size_t bytes_to_copy = (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); - double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); - double young_other_time_ms = predict_young_other_time_ms(young_length); + double copy_time_ms = _measurements->predict_object_copy_time_ms(bytes_to_copy, + collector_state()->during_concurrent_mark()); + double young_other_time_ms = _measurements->predict_young_other_time_ms(young_length); double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; if (pause_time_ms > target_pause_time_ms) { // end condition 2: prediction is over the target pause time @@ -384,10 +286,10 @@ uint base_min_length) const { uint desired_min_length = 0; if (adaptive_young_list_length()) { - if (_alloc_rate_ms_seq->num() > 3) { + if (_measurements->num_alloc_rate_ms() > 3) { double now_sec = os::elapsedTime(); double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; - double alloc_rate_ms = predict_alloc_rate_ms(); + double alloc_rate_ms = _measurements->predict_alloc_rate_ms(); desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); } else { // otherwise we don't have enough info to make the prediction @@ -406,7 +308,7 @@ } uint G1CollectorPolicy::update_young_list_max_and_target_length() { - return update_young_list_max_and_target_length(predict_rs_lengths()); + return update_young_list_max_and_target_length(_measurements->predict_rs_lengths()); } uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { @@ -511,9 +413,9 @@ double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; double survivor_regions_evac_time = predict_survivor_regions_evac_time(); - size_t pending_cards = get_new_size_prediction(_pending_cards_seq); - size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); - size_t scanned_cards = predict_young_card_num(adj_rs_lengths); + size_t pending_cards = _measurements->predict_pending_cards(); + size_t adj_rs_lengths = rs_lengths + _measurements->predict_rs_length_diff(); + size_t scanned_cards = _measurements->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true); double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards) + survivor_regions_evac_time; @@ -613,7 +515,7 @@ } void G1CollectorPolicy::update_rs_lengths_prediction() { - update_rs_lengths_prediction(predict_rs_lengths()); + update_rs_lengths_prediction(_measurements->predict_rs_lengths()); } void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { @@ -681,7 +583,7 @@ double full_gc_time_sec = end_sec - _full_collection_start_sec; double full_gc_time_ms = full_gc_time_sec * 1000.0; - update_recent_gc_times(end_sec, full_gc_time_ms); + _measurements->update_recent_gc_times(end_sec, full_gc_time_ms); collector_state()->set_full_collection(false); @@ -749,8 +651,8 @@ void G1CollectorPolicy::record_concurrent_mark_remark_end() { double end_time_sec = os::elapsedTime(); double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; - _concurrent_mark_remark_times_ms->add(elapsed_time_ms); - _prev_collection_pause_end_ms += elapsed_time_ms; + _measurements->report_concurrent_mark_remark_times_ms(elapsed_time_ms); + _measurements->append_prev_collection_pause_end_ms(elapsed_time_ms); record_pause(Remark, _mark_remark_start_sec, end_time_sec); } @@ -849,7 +751,7 @@ maybe_start_marking(); } - double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); + double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _measurements->prev_collection_pause_end_ms()); if (app_time_ms < MIN_TIMER_GRANULARITY) { // This usually happens due to the timer not having the required // granularity. Some Linuxes are the usual culprits. @@ -868,31 +770,9 @@ // place we can safely ignore them here. uint regions_allocated = _collection_set->eden_region_length(); double alloc_rate_ms = (double) regions_allocated / app_time_ms; - _alloc_rate_ms_seq->add(alloc_rate_ms); + _measurements->report_alloc_rate_ms(alloc_rate_ms); - double interval_ms = - (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; - update_recent_gc_times(end_time_sec, pause_time_ms); - _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; - if (recent_avg_pause_time_ratio() < 0.0 || - (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { - // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in - // CR 6902692 by redoing the manner in which the ratio is incrementally computed. - if (_recent_avg_pause_time_ratio < 0.0) { - _recent_avg_pause_time_ratio = 0.0; - } else { - assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); - _recent_avg_pause_time_ratio = 1.0; - } - } - - // Compute the ratio of just this last pause time to the entire time range stored - // in the vectors. Comparing this pause to the entire range, rather than only the - // most recent interval, has the effect of smoothing over a possible transient 'burst' - // of more frequent pauses that don't really reflect a change in heap occupancy. - // This reduces the likelihood of a needless heap expansion being triggered. - _last_pause_time_ratio = - (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; + _measurements->update_recent_gc_times(end_time_sec, pause_time_ms); } bool new_in_marking_window = collector_state()->in_marking_window(); @@ -938,28 +818,20 @@ double cost_per_card_ms = 0.0; if (_pending_cards > 0) { cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards; - _cost_per_card_ms_seq->add(cost_per_card_ms); + _measurements->report_cost_per_card_ms(cost_per_card_ms); } - _cost_scan_hcc_seq->add(scan_hcc_time_ms); + _measurements->report_cost_scan_hcc(scan_hcc_time_ms); double cost_per_entry_ms = 0.0; if (cards_scanned > 10) { cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; - if (collector_state()->last_gc_was_young()) { - _cost_per_entry_ms_seq->add(cost_per_entry_ms); - } else { - _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); - } + _measurements->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young()); } if (_max_rs_lengths > 0) { double cards_per_entry_ratio = (double) cards_scanned / (double) _max_rs_lengths; - if (collector_state()->last_gc_was_young()) { - _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); - } else { - _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); - } + _measurements->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young()); } // This is defensive. For a while _max_rs_lengths could get @@ -980,7 +852,7 @@ if (_max_rs_lengths > recorded_rs_lengths) { rs_length_diff = _max_rs_lengths - recorded_rs_lengths; } - _rs_length_diff_seq->add((double) rs_length_diff); + _measurements->report_rs_length_diff((double) rs_length_diff); size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; @@ -988,27 +860,23 @@ if (copied_bytes > 0) { cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; - if (collector_state()->in_marking_window()) { - _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); - } else { - _cost_per_byte_ms_seq->add(cost_per_byte_ms); - } + _measurements->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window()); } if (_collection_set->young_region_length() > 0) { - _young_other_cost_per_region_ms_seq->add(young_other_time_ms() / - _collection_set->young_region_length()); + _measurements->report_young_other_cost_per_region_ms(young_other_time_ms() / + _collection_set->young_region_length()); } if (_collection_set->old_region_length() > 0) { - _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / - _collection_set->old_region_length()); + _measurements->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / + _collection_set->old_region_length()); } - _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); + _measurements->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); - _pending_cards_seq->add((double) _pending_cards); - _rs_lengths_seq->add((double) _max_rs_lengths); + _measurements->report_pending_cards((double) _pending_cards); + _measurements->report_rs_lengths((double) _max_rs_lengths); } collector_state()->set_in_marking_window(new_in_marking_window); @@ -1145,106 +1013,10 @@ dcqs.notify_if_necessary(); } -size_t G1CollectorPolicy::predict_rs_lengths() const { - return get_new_size_prediction(_rs_lengths_seq); -} - -size_t G1CollectorPolicy::predict_rs_length_diff() const { - return get_new_size_prediction(_rs_length_diff_seq); -} - -double G1CollectorPolicy::predict_alloc_rate_ms() const { - return get_new_prediction(_alloc_rate_ms_seq); -} - -double G1CollectorPolicy::predict_cost_per_card_ms() const { - return get_new_prediction(_cost_per_card_ms_seq); -} - -double G1CollectorPolicy::predict_scan_hcc_ms() const { - return get_new_prediction(_cost_scan_hcc_seq); -} - -double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const { - return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); -} - -double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const { - return get_new_prediction(_young_cards_per_entry_ratio_seq); -} - -double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const { - if (_mixed_cards_per_entry_ratio_seq->num() < 2) { - return predict_young_cards_per_entry_ratio(); - } else { - return get_new_prediction(_mixed_cards_per_entry_ratio_seq); - } -} - -size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const { - return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); -} - -size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const { - return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio()); -} - -double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const { - if (collector_state()->gcs_are_young()) { - return card_num * get_new_prediction(_cost_per_entry_ms_seq); - } else { - return predict_mixed_rs_scan_time_ms(card_num); - } -} - -double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const { - if (_mixed_cost_per_entry_ms_seq->num() < 3) { - return card_num * get_new_prediction(_cost_per_entry_ms_seq); - } else { - return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq); - } -} - -double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { - if (_cost_per_byte_ms_during_cm_seq->num() < 3) { - return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); - } else { - return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq); - } -} - -double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const { - if (collector_state()->during_concurrent_mark()) { - return predict_object_copy_time_ms_during_cm(bytes_to_copy); - } else { - return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); - } -} - -double G1CollectorPolicy::predict_constant_other_time_ms() const { - return get_new_prediction(_constant_other_time_ms_seq); -} - -double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const { - return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); -} - -double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const { - return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); -} - -double G1CollectorPolicy::predict_remark_time_ms() const { - return get_new_prediction(_concurrent_mark_remark_times_ms); -} - -double G1CollectorPolicy::predict_cleanup_time_ms() const { - return get_new_prediction(_concurrent_mark_cleanup_times_ms); -} - double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { TruncatedSeq* seq = surv_rate_group->get_seq(age); guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); - double pred = get_new_prediction(seq); + double pred = _predictor.get_new_prediction(seq); if (pred > 1.0) { pred = 1.0; } @@ -1262,19 +1034,14 @@ double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, size_t scanned_cards) const { return - predict_rs_update_time_ms(pending_cards) + - predict_rs_scan_time_ms(scanned_cards) + - predict_constant_other_time_ms(); + _measurements->predict_rs_update_time_ms(pending_cards) + + _measurements->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) + + _measurements->predict_constant_other_time_ms(); } double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { - size_t rs_length = predict_rs_lengths() + predict_rs_length_diff(); - size_t card_num; - if (collector_state()->gcs_are_young()) { - card_num = predict_young_card_num(rs_length); - } else { - card_num = predict_non_young_card_num(rs_length); - } + size_t rs_length = _measurements->predict_rs_lengths() + _measurements->predict_rs_length_diff(); + size_t card_num = _measurements->predict_card_num(rs_length, collector_state()->gcs_are_young()); return predict_base_elapsed_time_ms(pending_cards, card_num); } @@ -1294,38 +1061,25 @@ double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const { size_t rs_length = hr->rem_set()->occupied(); - size_t card_num; - // Predicting the number of cards is based on which type of GC // we're predicting for. - if (for_young_gc) { - card_num = predict_young_card_num(rs_length); - } else { - card_num = predict_non_young_card_num(rs_length); - } + size_t card_num = _measurements->predict_card_num(rs_length, for_young_gc); size_t bytes_to_copy = predict_bytes_to_copy(hr); double region_elapsed_time_ms = - predict_rs_scan_time_ms(card_num) + - predict_object_copy_time_ms(bytes_to_copy); + _measurements->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) + + _measurements->predict_object_copy_time_ms(bytes_to_copy ,collector_state()->during_concurrent_mark()); // The prediction of the "other" time for this region is based // upon the region type and NOT the GC type. if (hr->is_young()) { - region_elapsed_time_ms += predict_young_other_time_ms(1); + region_elapsed_time_ms += _measurements->predict_young_other_time_ms(1); } else { - region_elapsed_time_ms += predict_non_young_other_time_ms(1); + region_elapsed_time_ms += _measurements->predict_non_young_other_time_ms(1); } return region_elapsed_time_ms; } -void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, - double elapsed_ms) { - _recent_gc_times_ms->add(elapsed_ms); - _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); - _prev_collection_pause_end_ms = end_time_sec * 1000.0; -} - void G1CollectorPolicy::clear_ratio_check_data() { _ratio_over_threshold_count = 0; _ratio_over_threshold_sum = 0.0; @@ -1333,8 +1087,8 @@ } size_t G1CollectorPolicy::expansion_amount() { - double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; - double last_gc_overhead = _last_pause_time_ratio * 100.0; + double recent_gc_overhead = _measurements->recent_avg_pause_time_ratio() * 100.0; + double last_gc_overhead = _measurements->last_pause_time_ratio() * 100.0; double threshold = _gc_overhead_perc; size_t expand_bytes = 0; @@ -1619,8 +1373,8 @@ double end_sec = os::elapsedTime(); double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; - _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); - _prev_collection_pause_end_ms += elapsed_time_ms; + _measurements->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); + _measurements->append_prev_collection_pause_end_ms(elapsed_time_ms); record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); } @@ -1758,4 +1512,3 @@ double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms); _collection_set->finalize_old_part(time_remaining_ms); } - diff --git a/src/share/vm/gc/g1/g1CollectorPolicy.hpp b/src/share/vm/gc/g1/g1CollectorPolicy.hpp --- a/src/share/vm/gc/g1/g1CollectorPolicy.hpp +++ b/src/share/vm/gc/g1/g1CollectorPolicy.hpp @@ -43,6 +43,7 @@ class G1CollectionSet; class CollectionSetChooser; class G1IHOPControl; +class G1Measurements; class G1YoungGenSizer; class G1CollectorPolicy: public CollectorPolicy { @@ -57,9 +58,7 @@ void report_ihop_statistics(); G1Predictions _predictor; - - double get_new_prediction(TruncatedSeq const* seq) const; - size_t get_new_size_prediction(TruncatedSeq const* seq) const; + G1Measurements* _measurements; G1MMUTracker* _mmu_tracker; @@ -68,12 +67,6 @@ double _full_collection_start_sec; - // These exclude marking times. - TruncatedSeq* _recent_gc_times_ms; - - TruncatedSeq* _concurrent_mark_remark_times_ms; - TruncatedSeq* _concurrent_mark_cleanup_times_ms; - // Ratio check data for determining if heap growth is necessary. uint _ratio_over_threshold_count; double _ratio_over_threshold_sum; @@ -88,7 +81,6 @@ SurvRateGroup* _short_lived_surv_rate_group; SurvRateGroup* _survivor_surv_rate_group; - // add here any more surv rate groups double _gc_overhead_perc; @@ -96,34 +88,12 @@ uint _reserve_regions; enum PredictionConstants { - TruncatedSeqLength = 10, NumPrevPausesForHeuristics = 10, // MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics, // representing the minimum number of pause time ratios that exceed // GCTimeRatio before a heap expansion will be triggered. MinOverThresholdForGrowth = 4 }; - - TruncatedSeq* _alloc_rate_ms_seq; - double _prev_collection_pause_end_ms; - - TruncatedSeq* _rs_length_diff_seq; - TruncatedSeq* _cost_per_card_ms_seq; - TruncatedSeq* _cost_scan_hcc_seq; - TruncatedSeq* _young_cards_per_entry_ratio_seq; - TruncatedSeq* _mixed_cards_per_entry_ratio_seq; - TruncatedSeq* _cost_per_entry_ms_seq; - TruncatedSeq* _mixed_cost_per_entry_ms_seq; - TruncatedSeq* _cost_per_byte_ms_seq; - TruncatedSeq* _constant_other_time_ms_seq; - TruncatedSeq* _young_other_cost_per_region_ms_seq; - TruncatedSeq* _non_young_other_cost_per_region_ms_seq; - - TruncatedSeq* _pending_cards_seq; - TruncatedSeq* _rs_lengths_seq; - - TruncatedSeq* _cost_per_byte_ms_during_cm_seq; - G1YoungGenSizer* _young_gen_sizer; uint _free_regions_at_end_of_collection; @@ -151,6 +121,7 @@ G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed; public: const G1Predictions& predictor() const { return _predictor; } + const G1Measurements* measurements() { return const_cast(_measurements); } // Add the given number of bytes to the total number of allocated bytes in the old gen. void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; } @@ -177,39 +148,6 @@ _max_rs_lengths = rs_lengths; } - size_t predict_rs_lengths() const; - - size_t predict_rs_length_diff() const; - - double predict_alloc_rate_ms() const; - - double predict_cost_per_card_ms() const; - - double predict_scan_hcc_ms() const; - - double predict_rs_update_time_ms(size_t pending_cards) const; - - double predict_young_cards_per_entry_ratio() const; - - double predict_mixed_cards_per_entry_ratio() const; - - size_t predict_young_card_num(size_t rs_length) const; - - size_t predict_non_young_card_num(size_t rs_length) const; - - double predict_rs_scan_time_ms(size_t card_num) const; - - double predict_mixed_rs_scan_time_ms(size_t card_num) const; - - double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const; - - double predict_object_copy_time_ms(size_t bytes_to_copy) const; - - double predict_constant_other_time_ms() const; - - double predict_young_other_time_ms(size_t young_num) const; - - double predict_non_young_other_time_ms(size_t non_young_num) const; double predict_base_elapsed_time_ms(size_t pending_cards) const; double predict_base_elapsed_time_ms(size_t pending_cards, @@ -242,10 +180,6 @@ return _mmu_tracker->max_gc_time() * 1000.0; } - double predict_remark_time_ms() const; - - double predict_cleanup_time_ms() const; - // Returns an estimate of the survival rate of the region at yg-age // "yg_age". double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const; @@ -265,11 +199,6 @@ CollectionSetChooser* cset_chooser() const; private: - // Statistics kept per GC stoppage, pause or full. - TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; - - // Add a new GC of the given duration and end time to the record. - void update_recent_gc_times(double end_time_sec, double elapsed_ms); // The number of bytes copied during the GC. size_t _bytes_copied_during_gc; @@ -279,15 +208,6 @@ G1GCPhaseTimes* _phase_times; - // The ratio of gc time to elapsed time, computed over recent pauses, - // and the ratio for just the last pause. - double _recent_avg_pause_time_ratio; - double _last_pause_time_ratio; - - double recent_avg_pause_time_ratio() const { - return _recent_avg_pause_time_ratio; - } - // This set of variables tracks the collector efficiency, in order to // determine whether we should initiate a new marking. double _mark_remark_start_sec; @@ -491,7 +411,6 @@ } else { _short_lived_surv_rate_group->finished_recalculating_age_indexes(); } - // do that for any other surv rate groups } size_t young_list_target_length() const { return _young_list_target_length; } diff --git a/src/share/vm/gc/g1/g1CollectorPolicy.cpp b/src/share/vm/gc/g1/g1Measurements.cpp copy from src/share/vm/gc/g1/g1CollectorPolicy.cpp copy to src/share/vm/gc/g1/g1Measurements.cpp --- a/src/share/vm/gc/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc/g1/g1Measurements.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,23 +23,11 @@ */ #include "precompiled.hpp" -#include "gc/g1/concurrentG1Refine.hpp" -#include "gc/g1/concurrentMarkThread.inline.hpp" -#include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/g1CollectionSet.hpp" -#include "gc/g1/g1CollectorPolicy.hpp" -#include "gc/g1/g1ConcurrentMark.hpp" -#include "gc/g1/g1IHOPControl.hpp" -#include "gc/g1/g1GCPhaseTimes.hpp" -#include "gc/g1/g1YoungGenSizer.hpp" -#include "gc/g1/heapRegion.inline.hpp" -#include "gc/g1/heapRegionRemSet.hpp" -#include "gc/shared/gcPolicyCounters.hpp" -#include "runtime/arguments.hpp" -#include "runtime/java.hpp" -#include "runtime/mutexLocker.hpp" +#include "gc/g1/g1Measurements.hpp" +#include "gc/g1/g1Predictions.hpp" +#include "runtime/os.hpp" #include "utilities/debug.hpp" -#include "utilities/pair.hpp" +#include "utilities/numberSeq.hpp" // Different defaults for different number of GC threads // They were chosen by running GCOld and SPECjbb on debris with different @@ -81,77 +69,32 @@ 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 }; -G1CollectorPolicy::G1CollectorPolicy() : - _predictor(G1ConfidencePercent / 100.0), +G1Measurements::G1Measurements(G1Predictions* predictor) : + _predictor(predictor), + _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), + _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _prev_collection_pause_end_ms(0.0), + _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), + _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), + _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), + _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _non_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), + _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), + _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)) { - _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), - - _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), - _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), - - _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _prev_collection_pause_end_ms(0.0), - _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), - _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), - _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), - _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _non_young_other_cost_per_region_ms_seq( - new TruncatedSeq(TruncatedSeqLength)), - - _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), - _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), - - _pause_time_target_ms((double) MaxGCPauseMillis), - - _recent_prev_end_times_for_all_gcs_sec( - new TruncatedSeq(NumPrevPausesForHeuristics)), - - _recent_avg_pause_time_ratio(0.0), - _rs_lengths_prediction(0), - _max_survivor_regions(0), - - // add here any more surv rate groups - _survivors_age_table(true), - - _gc_overhead_perc(0.0), - - _bytes_allocated_in_old_since_last_gc(0), - _ihop_control(NULL), - _initial_mark_to_mixed() { - - // SurvRateGroups below must be initialized after the predictor because they - // indirectly use it through this object passed to their constructor. - _short_lived_surv_rate_group = - new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary); - _survivor_surv_rate_group = - new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary); - - // Set up the region size and associated fields. Given that the - // policy is created before the heap, we have to set this up here, - // so it's done as soon as possible. - - // It would have been natural to pass initial_heap_byte_size() and - // max_heap_byte_size() to setup_heap_region_size() but those have - // not been set up at this point since they should be aligned with - // the region size. So, there is a circular dependency here. We base - // the region size on the heap size, but the heap size should be - // aligned with the region size. To get around this we use the - // unaligned values for the heap. - HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); - HeapRegionRemSet::setup_remset_size(); - + // Seed sequences with initial values. _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; - clear_ratio_check_data(); - - _phase_times = new G1GCPhaseTimes(ParallelGCThreads); int index = MIN2(ParallelGCThreads - 1, 7u); @@ -168,1012 +111,135 @@ _non_young_other_cost_per_region_ms_seq->add( non_young_other_cost_per_region_ms_defaults[index]); - // Below, we might need to calculate the pause time target based on - // the pause interval. When we do so we are going to give G1 maximum - // flexibility and allow it to do pauses when it needs to. So, we'll - // arrange that the pause interval to be pause time target + 1 to - // ensure that a) the pause time target is maximized with respect to - // the pause interval and b) we maintain the invariant that pause - // time target < pause interval. If the user does not want this - // maximum flexibility, they will have to set the pause interval - // explicitly. + // start conservatively (around 50ms is about right) + _concurrent_mark_remark_times_ms->add(0.05); + _concurrent_mark_cleanup_times_ms->add(0.20); +} - // First make sure that, if either parameter is set, its value is - // reasonable. - if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { - if (MaxGCPauseMillis < 1) { - vm_exit_during_initialization("MaxGCPauseMillis should be " - "greater than 0"); - } - } - if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { - if (GCPauseIntervalMillis < 1) { - vm_exit_during_initialization("GCPauseIntervalMillis should be " - "greater than 0"); +double G1Measurements::get_new_prediction(TruncatedSeq const* seq) const { + return _predictor->get_new_prediction(seq); +} + +size_t G1Measurements::get_new_size_prediction(TruncatedSeq const* seq) const { + return (size_t)get_new_prediction(seq); +} + +int G1Measurements::num_alloc_rate_ms() const { + return _alloc_rate_ms_seq->num(); +} + +void G1Measurements::report_concurrent_mark_remark_times_ms(double ms) { + _concurrent_mark_remark_times_ms->add(ms); +} + +void G1Measurements::report_alloc_rate_ms(double alloc_rate) { + _alloc_rate_ms_seq->add(alloc_rate); +} + +void G1Measurements::compute_pause_time_ratio(double interval_ms, double pause_time_ms) { + _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms; + if (_recent_avg_pause_time_ratio < 0.0 || + (_recent_avg_pause_time_ratio - 1.0 > 0.0)) { + // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in + // CR 6902692 by redoing the manner in which the ratio is incrementally computed. + if (_recent_avg_pause_time_ratio < 0.0) { + _recent_avg_pause_time_ratio = 0.0; + } else { + assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); + _recent_avg_pause_time_ratio = 1.0; } } - // Then, if the pause time target parameter was not set, set it to - // the default value. - if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { - if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { - // The default pause time target in G1 is 200ms - FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); - } else { - // We do not allow the pause interval to be set without the - // pause time target - vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " - "without setting MaxGCPauseMillis"); - } - } - - // Then, if the interval parameter was not set, set it according to - // the pause time target (this will also deal with the case when the - // pause time target is the default value). - if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { - FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); - } - - // Finally, make sure that the two parameters are consistent. - if (MaxGCPauseMillis >= GCPauseIntervalMillis) { - char buffer[256]; - jio_snprintf(buffer, 256, - "MaxGCPauseMillis (%u) should be less than " - "GCPauseIntervalMillis (%u)", - MaxGCPauseMillis, GCPauseIntervalMillis); - vm_exit_during_initialization(buffer); - } - - double max_gc_time = (double) MaxGCPauseMillis / 1000.0; - double time_slice = (double) GCPauseIntervalMillis / 1000.0; - _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); - - // start conservatively (around 50ms is about right) - _concurrent_mark_remark_times_ms->add(0.05); - _concurrent_mark_cleanup_times_ms->add(0.20); - _tenuring_threshold = MaxTenuringThreshold; - - assert(GCTimeRatio > 0, - "we should have set it to a default value set_g1_gc_flags() " - "if a user set it to 0"); - _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); - - uintx reserve_perc = G1ReservePercent; - // Put an artificial ceiling on this so that it's not set to a silly value. - if (reserve_perc > 50) { - reserve_perc = 50; - warning("G1ReservePercent is set to a value that is too large, " - "it's been updated to " UINTX_FORMAT, reserve_perc); - } - _reserve_factor = (double) reserve_perc / 100.0; - // This will be set when the heap is expanded - // for the first time during initialization. - _reserve_regions = 0; - - _ihop_control = create_ihop_control(); + // Compute the ratio of just this last pause time to the entire time range stored + // in the vectors. Comparing this pause to the entire range, rather than only the + // most recent interval, has the effect of smoothing over a possible transient 'burst' + // of more frequent pauses that don't really reflect a change in heap occupancy. + // This reduces the likelihood of a needless heap expansion being triggered. + _last_pause_time_ratio = + (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; } -G1CollectorPolicy::~G1CollectorPolicy() { - delete _ihop_control; +void G1Measurements::report_cost_per_card_ms(double cost_per_card_ms) { + _cost_per_card_ms_seq->add(cost_per_card_ms); } -double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { - return _predictor.get_new_prediction(seq); +void G1Measurements::report_cost_scan_hcc(double cost_scan_hcc) { + _cost_scan_hcc_seq->add(cost_scan_hcc); } -size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const { - return (size_t)get_new_prediction(seq); -} - -void G1CollectorPolicy::initialize_alignments() { - _space_alignment = HeapRegion::GrainBytes; - size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); - size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); - _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); -} - -G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } - -void G1CollectorPolicy::post_heap_initialize() { - uintx max_regions = G1CollectedHeap::heap()->max_regions(); - size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; - if (max_young_size != MaxNewSize) { - FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); +void G1Measurements::report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young) { + if (last_gc_was_young) { + _cost_per_entry_ms_seq->add(cost_per_entry_ms); + } else { + _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); } } -void G1CollectorPolicy::initialize_flags() { - if (G1HeapRegionSize != HeapRegion::GrainBytes) { - FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes); - } - - if (SurvivorRatio < 1) { - vm_exit_during_initialization("Invalid survivor ratio specified"); - } - CollectorPolicy::initialize_flags(); - _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags -} - - -void G1CollectorPolicy::init() { - // Set aside an initial future to_space. - _g1 = G1CollectedHeap::heap(); - _collection_set = _g1->collection_set(); - _collection_set->set_policy(this); - - assert(Heap_lock->owned_by_self(), "Locking discipline."); - - initialize_gc_policy_counters(); - - if (adaptive_young_list_length()) { - _young_list_fixed_length = 0; +void G1Measurements::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young) { + if (last_gc_was_young) { + _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); } else { - _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); - } - _free_regions_at_end_of_collection = _g1->num_free_regions(); - - update_young_list_max_and_target_length(); - // We may immediately start allocating regions and placing them on the - // collection set list. Initialize the per-collection set info - _collection_set->start_incremental_building(); -} - -void G1CollectorPolicy::note_gc_start(uint num_active_workers) { - phase_times()->note_gc_start(num_active_workers); -} - -// Create the jstat counters for the policy. -void G1CollectorPolicy::initialize_gc_policy_counters() { - _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); -} - -bool G1CollectorPolicy::predict_will_fit(uint young_length, - double base_time_ms, - uint base_free_regions, - double target_pause_time_ms) const { - if (young_length >= base_free_regions) { - // end condition 1: not enough space for the young regions - return false; - } - - double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); - size_t bytes_to_copy = - (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); - double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); - double young_other_time_ms = predict_young_other_time_ms(young_length); - double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; - if (pause_time_ms > target_pause_time_ms) { - // end condition 2: prediction is over the target pause time - return false; - } - - size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes; - - // When copying, we will likely need more bytes free than is live in the region. - // Add some safety margin to factor in the confidence of our guess, and the - // natural expected waste. - // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty - // of the calculation: the lower the confidence, the more headroom. - // (100 + TargetPLABWastePct) represents the increase in expected bytes during - // copying due to anticipated waste in the PLABs. - double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0; - size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy); - - if (expected_bytes_to_copy > free_bytes) { - // end condition 3: out-of-space - return false; - } - - // success! - return true; -} - -void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { - // re-calculate the necessary reserve - double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; - // We use ceiling so that if reserve_regions_d is > 0.0 (but - // smaller than 1.0) we'll get 1. - _reserve_regions = (uint) ceil(reserve_regions_d); - - _young_gen_sizer->heap_size_changed(new_number_of_regions); - - _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); -} - -uint G1CollectorPolicy::calculate_young_list_desired_min_length( - uint base_min_length) const { - uint desired_min_length = 0; - if (adaptive_young_list_length()) { - if (_alloc_rate_ms_seq->num() > 3) { - double now_sec = os::elapsedTime(); - double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; - double alloc_rate_ms = predict_alloc_rate_ms(); - desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); - } else { - // otherwise we don't have enough info to make the prediction - } - } - desired_min_length += base_min_length; - // make sure we don't go below any user-defined minimum bound - return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); -} - -uint G1CollectorPolicy::calculate_young_list_desired_max_length() const { - // Here, we might want to also take into account any additional - // constraints (i.e., user-defined minimum bound). Currently, we - // effectively don't set this bound. - return _young_gen_sizer->max_desired_young_length(); -} - -uint G1CollectorPolicy::update_young_list_max_and_target_length() { - return update_young_list_max_and_target_length(predict_rs_lengths()); -} - -uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { - uint unbounded_target_length = update_young_list_target_length(rs_lengths); - update_max_gc_locker_expansion(); - return unbounded_target_length; -} - -uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { - YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); - _young_list_target_length = young_lengths.first; - return young_lengths.second; -} - -G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const { - YoungTargetLengths result; - - // Calculate the absolute and desired min bounds first. - - // This is how many young regions we already have (currently: the survivors). - const uint base_min_length = _g1->young_list()->survivor_length(); - uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); - // This is the absolute minimum young length. Ensure that we - // will at least have one eden region available for allocation. - uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); - // If we shrank the young list target it should not shrink below the current size. - desired_min_length = MAX2(desired_min_length, absolute_min_length); - // Calculate the absolute and desired max bounds. - - uint desired_max_length = calculate_young_list_desired_max_length(); - - uint young_list_target_length = 0; - if (adaptive_young_list_length()) { - if (collector_state()->gcs_are_young()) { - young_list_target_length = - calculate_young_list_target_length(rs_lengths, - base_min_length, - desired_min_length, - desired_max_length); - } else { - // Don't calculate anything and let the code below bound it to - // the desired_min_length, i.e., do the next GC as soon as - // possible to maximize how many old regions we can add to it. - } - } else { - // The user asked for a fixed young gen so we'll fix the young gen - // whether the next GC is young or mixed. - young_list_target_length = _young_list_fixed_length; - } - - result.second = young_list_target_length; - - // We will try our best not to "eat" into the reserve. - uint absolute_max_length = 0; - if (_free_regions_at_end_of_collection > _reserve_regions) { - absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; - } - if (desired_max_length > absolute_max_length) { - desired_max_length = absolute_max_length; - } - - // Make sure we don't go over the desired max length, nor under the - // desired min length. In case they clash, desired_min_length wins - // which is why that test is second. - if (young_list_target_length > desired_max_length) { - young_list_target_length = desired_max_length; - } - if (young_list_target_length < desired_min_length) { - young_list_target_length = desired_min_length; - } - - assert(young_list_target_length > base_min_length, - "we should be able to allocate at least one eden region"); - assert(young_list_target_length >= absolute_min_length, "post-condition"); - - result.first = young_list_target_length; - return result; -} - -uint -G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, - uint base_min_length, - uint desired_min_length, - uint desired_max_length) const { - assert(adaptive_young_list_length(), "pre-condition"); - assert(collector_state()->gcs_are_young(), "only call this for young GCs"); - - // In case some edge-condition makes the desired max length too small... - if (desired_max_length <= desired_min_length) { - return desired_min_length; - } - - // We'll adjust min_young_length and max_young_length not to include - // the already allocated young regions (i.e., so they reflect the - // min and max eden regions we'll allocate). The base_min_length - // will be reflected in the predictions by the - // survivor_regions_evac_time prediction. - assert(desired_min_length > base_min_length, "invariant"); - uint min_young_length = desired_min_length - base_min_length; - assert(desired_max_length > base_min_length, "invariant"); - uint max_young_length = desired_max_length - base_min_length; - - double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; - double survivor_regions_evac_time = predict_survivor_regions_evac_time(); - size_t pending_cards = get_new_size_prediction(_pending_cards_seq); - size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); - size_t scanned_cards = predict_young_card_num(adj_rs_lengths); - double base_time_ms = - predict_base_elapsed_time_ms(pending_cards, scanned_cards) + - survivor_regions_evac_time; - uint available_free_regions = _free_regions_at_end_of_collection; - uint base_free_regions = 0; - if (available_free_regions > _reserve_regions) { - base_free_regions = available_free_regions - _reserve_regions; - } - - // Here, we will make sure that the shortest young length that - // makes sense fits within the target pause time. - - if (predict_will_fit(min_young_length, base_time_ms, - base_free_regions, target_pause_time_ms)) { - // The shortest young length will fit into the target pause time; - // we'll now check whether the absolute maximum number of young - // regions will fit in the target pause time. If not, we'll do - // a binary search between min_young_length and max_young_length. - if (predict_will_fit(max_young_length, base_time_ms, - base_free_regions, target_pause_time_ms)) { - // The maximum young length will fit into the target pause time. - // We are done so set min young length to the maximum length (as - // the result is assumed to be returned in min_young_length). - min_young_length = max_young_length; - } else { - // The maximum possible number of young regions will not fit within - // the target pause time so we'll search for the optimal - // length. The loop invariants are: - // - // min_young_length < max_young_length - // min_young_length is known to fit into the target pause time - // max_young_length is known not to fit into the target pause time - // - // Going into the loop we know the above hold as we've just - // checked them. Every time around the loop we check whether - // the middle value between min_young_length and - // max_young_length fits into the target pause time. If it - // does, it becomes the new min. If it doesn't, it becomes - // the new max. This way we maintain the loop invariants. - - assert(min_young_length < max_young_length, "invariant"); - uint diff = (max_young_length - min_young_length) / 2; - while (diff > 0) { - uint young_length = min_young_length + diff; - if (predict_will_fit(young_length, base_time_ms, - base_free_regions, target_pause_time_ms)) { - min_young_length = young_length; - } else { - max_young_length = young_length; - } - assert(min_young_length < max_young_length, "invariant"); - diff = (max_young_length - min_young_length) / 2; - } - // The results is min_young_length which, according to the - // loop invariants, should fit within the target pause time. - - // These are the post-conditions of the binary search above: - assert(min_young_length < max_young_length, - "otherwise we should have discovered that max_young_length " - "fits into the pause target and not done the binary search"); - assert(predict_will_fit(min_young_length, base_time_ms, - base_free_regions, target_pause_time_ms), - "min_young_length, the result of the binary search, should " - "fit into the pause target"); - assert(!predict_will_fit(min_young_length + 1, base_time_ms, - base_free_regions, target_pause_time_ms), - "min_young_length, the result of the binary search, should be " - "optimal, so no larger length should fit into the pause target"); - } - } else { - // Even the minimum length doesn't fit into the pause time - // target, return it as the result nevertheless. - } - return base_min_length + min_young_length; -} - -double G1CollectorPolicy::predict_survivor_regions_evac_time() const { - double survivor_regions_evac_time = 0.0; - for (HeapRegion * r = _g1->young_list()->first_survivor_region(); - r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region(); - r = r->get_next_young_region()) { - survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); - } - return survivor_regions_evac_time; -} - -void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) { - guarantee( adaptive_young_list_length(), "should not call this otherwise" ); - - if (rs_lengths > _rs_lengths_prediction) { - // add 10% to avoid having to recalculate often - size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; - update_rs_lengths_prediction(rs_lengths_prediction); - - update_young_list_max_and_target_length(rs_lengths_prediction); + _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); } } -void G1CollectorPolicy::update_rs_lengths_prediction() { - update_rs_lengths_prediction(predict_rs_lengths()); +void G1Measurements::report_rs_length_diff(double rs_length_diff) { + _rs_length_diff_seq->add(rs_length_diff); } -void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { - if (collector_state()->gcs_are_young() && adaptive_young_list_length()) { - _rs_lengths_prediction = prediction; +void G1Measurements::report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window) { + if (in_marking_window) { + _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); + } else { + _cost_per_byte_ms_seq->add(cost_per_byte_ms); } } -#ifndef PRODUCT -bool G1CollectorPolicy::verify_young_ages() { - HeapRegion* head = _g1->young_list()->first_region(); - return - verify_young_ages(head, _short_lived_surv_rate_group); - // also call verify_young_ages on any additional surv rate groups +void G1Measurements::report_young_other_cost_per_region_ms(double other_cost_per_region_ms) { + _young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms); } -bool -G1CollectorPolicy::verify_young_ages(HeapRegion* head, - SurvRateGroup *surv_rate_group) { - guarantee( surv_rate_group != NULL, "pre-condition" ); - - const char* name = surv_rate_group->name(); - bool ret = true; - int prev_age = -1; - - for (HeapRegion* curr = head; - curr != NULL; - curr = curr->get_next_young_region()) { - SurvRateGroup* group = curr->surv_rate_group(); - if (group == NULL && !curr->is_survivor()) { - log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name); - ret = false; - } - - if (surv_rate_group == group) { - int age = curr->age_in_surv_rate_group(); - - if (age < 0) { - log_error(gc, verify)("## %s: encountered negative age", name); - ret = false; - } - - if (age <= prev_age) { - log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); - ret = false; - } - prev_age = age; - } - } - - return ret; -} -#endif // PRODUCT - -void G1CollectorPolicy::record_full_collection_start() { - _full_collection_start_sec = os::elapsedTime(); - // Release the future to-space so that it is available for compaction into. - collector_state()->set_full_collection(true); +void G1Measurements::report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms) { + _non_young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms); } -void G1CollectorPolicy::record_full_collection_end() { - // Consider this like a collection pause for the purposes of allocation - // since last pause. - double end_sec = os::elapsedTime(); - double full_gc_time_sec = end_sec - _full_collection_start_sec; - double full_gc_time_ms = full_gc_time_sec * 1000.0; - - update_recent_gc_times(end_sec, full_gc_time_ms); - - collector_state()->set_full_collection(false); - - // "Nuke" the heuristics that control the young/mixed GC - // transitions and make sure we start with young GCs after the Full GC. - collector_state()->set_gcs_are_young(true); - collector_state()->set_last_young_gc(false); - collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); - collector_state()->set_during_initial_mark_pause(false); - collector_state()->set_in_marking_window(false); - collector_state()->set_in_marking_window_im(false); - - _short_lived_surv_rate_group->start_adding_regions(); - // also call this on any additional surv rate groups - - _free_regions_at_end_of_collection = _g1->num_free_regions(); - // Reset survivors SurvRateGroup. - _survivor_surv_rate_group->reset(); - update_young_list_max_and_target_length(); - update_rs_lengths_prediction(); - cset_chooser()->clear(); - - _bytes_allocated_in_old_since_last_gc = 0; - - record_pause(FullGC, _full_collection_start_sec, end_sec); +void G1Measurements::report_constant_other_time_ms(double constant_other_time_ms) { + _constant_other_time_ms_seq->add(constant_other_time_ms); } -void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { - // We only need to do this here as the policy will only be applied - // to the GC we're about to start. so, no point is calculating this - // every time we calculate / recalculate the target young length. - update_survivors_policy(); - - assert(_g1->used() == _g1->recalculate_used(), - "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, - _g1->used(), _g1->recalculate_used()); - - phase_times()->record_cur_collection_start_sec(start_time_sec); - _pending_cards = _g1->pending_card_num(); - - _collection_set->reset_bytes_used_before(); - _bytes_copied_during_gc = 0; - - collector_state()->set_last_gc_was_young(false); - - // do that for any other surv rate groups - _short_lived_surv_rate_group->stop_adding_regions(); - _survivors_age_table.clear(); - - assert( verify_young_ages(), "region age verification" ); +void G1Measurements::report_pending_cards(double pending_cards) { + _pending_cards_seq->add(pending_cards); } -void G1CollectorPolicy::record_concurrent_mark_init_end(double - mark_init_elapsed_time_ms) { - collector_state()->set_during_marking(true); - assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); - collector_state()->set_during_initial_mark_pause(false); +void G1Measurements::report_rs_lengths(double rs_lengths) { + _rs_lengths_seq->add(rs_lengths); } -void G1CollectorPolicy::record_concurrent_mark_remark_start() { - _mark_remark_start_sec = os::elapsedTime(); - collector_state()->set_during_marking(false); -} - -void G1CollectorPolicy::record_concurrent_mark_remark_end() { - double end_time_sec = os::elapsedTime(); - double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; - _concurrent_mark_remark_times_ms->add(elapsed_time_ms); - _prev_collection_pause_end_ms += elapsed_time_ms; - - record_pause(Remark, _mark_remark_start_sec, end_time_sec); -} - -void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { - _mark_cleanup_start_sec = os::elapsedTime(); -} - -void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { - bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", - "skip last young-only gc"); - collector_state()->set_last_young_gc(should_continue_with_reclaim); - // We skip the marking phase. - if (!should_continue_with_reclaim) { - abort_time_to_mixed_tracking(); - } - collector_state()->set_in_marking_window(false); -} - -double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { - return phase_times()->average_time_ms(phase); -} - -double G1CollectorPolicy::young_other_time_ms() const { - return phase_times()->young_cset_choice_time_ms() + - phase_times()->young_free_cset_time_ms(); -} - -double G1CollectorPolicy::non_young_other_time_ms() const { - return phase_times()->non_young_cset_choice_time_ms() + - phase_times()->non_young_free_cset_time_ms(); - -} - -double G1CollectorPolicy::other_time_ms(double pause_time_ms) const { - return pause_time_ms - - average_time_ms(G1GCPhaseTimes::UpdateRS) - - average_time_ms(G1GCPhaseTimes::ScanRS) - - average_time_ms(G1GCPhaseTimes::ObjCopy) - - average_time_ms(G1GCPhaseTimes::Termination); -} - -double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const { - return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); -} - -CollectionSetChooser* G1CollectorPolicy::cset_chooser() const { - return _collection_set->cset_chooser(); -} - -bool G1CollectorPolicy::about_to_start_mixed_phase() const { - return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); -} - -bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { - if (about_to_start_mixed_phase()) { - return false; - } - - size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); - - size_t cur_used_bytes = _g1->non_young_capacity_bytes(); - size_t alloc_byte_size = alloc_word_size * HeapWordSize; - size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; - - bool result = false; - if (marking_request_bytes > marking_initiating_used_threshold) { - result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc(); - log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", - result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", - cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); - } - - return result; -} - -// Anything below that is considered to be zero -#define MIN_TIMER_GRANULARITY 0.0000001 - -void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { - double end_time_sec = os::elapsedTime(); - - size_t cur_used_bytes = _g1->used(); - assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); - bool last_pause_included_initial_mark = false; - bool update_stats = !_g1->evacuation_failed(); - - NOT_PRODUCT(_short_lived_surv_rate_group->print()); - - record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); - - last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); - if (last_pause_included_initial_mark) { - record_concurrent_mark_init_end(0.0); - } else { - maybe_start_marking(); - } - - double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); - if (app_time_ms < MIN_TIMER_GRANULARITY) { - // This usually happens due to the timer not having the required - // granularity. Some Linuxes are the usual culprits. - // We'll just set it to something (arbitrarily) small. - app_time_ms = 1.0; - } - - if (update_stats) { - // We maintain the invariant that all objects allocated by mutator - // threads will be allocated out of eden regions. So, we can use - // the eden region number allocated since the previous GC to - // calculate the application's allocate rate. The only exception - // to that is humongous objects that are allocated separately. But - // given that humongous object allocations do not really affect - // either the pause's duration nor when the next pause will take - // place we can safely ignore them here. - uint regions_allocated = _collection_set->eden_region_length(); - double alloc_rate_ms = (double) regions_allocated / app_time_ms; - _alloc_rate_ms_seq->add(alloc_rate_ms); - - double interval_ms = - (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; - update_recent_gc_times(end_time_sec, pause_time_ms); - _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; - if (recent_avg_pause_time_ratio() < 0.0 || - (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { - // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in - // CR 6902692 by redoing the manner in which the ratio is incrementally computed. - if (_recent_avg_pause_time_ratio < 0.0) { - _recent_avg_pause_time_ratio = 0.0; - } else { - assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); - _recent_avg_pause_time_ratio = 1.0; - } - } - - // Compute the ratio of just this last pause time to the entire time range stored - // in the vectors. Comparing this pause to the entire range, rather than only the - // most recent interval, has the effect of smoothing over a possible transient 'burst' - // of more frequent pauses that don't really reflect a change in heap occupancy. - // This reduces the likelihood of a needless heap expansion being triggered. - _last_pause_time_ratio = - (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; - } - - bool new_in_marking_window = collector_state()->in_marking_window(); - bool new_in_marking_window_im = false; - if (last_pause_included_initial_mark) { - new_in_marking_window = true; - new_in_marking_window_im = true; - } - - if (collector_state()->last_young_gc()) { - // This is supposed to to be the "last young GC" before we start - // doing mixed GCs. Here we decide whether to start mixed GCs or not. - assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC"); - - if (next_gc_should_be_mixed("start mixed GCs", - "do not start mixed GCs")) { - collector_state()->set_gcs_are_young(false); - } else { - // We aborted the mixed GC phase early. - abort_time_to_mixed_tracking(); - } - - collector_state()->set_last_young_gc(false); - } - - if (!collector_state()->last_gc_was_young()) { - // This is a mixed GC. Here we decide whether to continue doing - // mixed GCs or not. - if (!next_gc_should_be_mixed("continue mixed GCs", - "do not continue mixed GCs")) { - collector_state()->set_gcs_are_young(true); - - maybe_start_marking(); - } - } - - _short_lived_surv_rate_group->start_adding_regions(); - // Do that for any other surv rate groups - - double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0; - - if (update_stats) { - double cost_per_card_ms = 0.0; - if (_pending_cards > 0) { - cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards; - _cost_per_card_ms_seq->add(cost_per_card_ms); - } - _cost_scan_hcc_seq->add(scan_hcc_time_ms); - - double cost_per_entry_ms = 0.0; - if (cards_scanned > 10) { - cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; - if (collector_state()->last_gc_was_young()) { - _cost_per_entry_ms_seq->add(cost_per_entry_ms); - } else { - _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); - } - } - - if (_max_rs_lengths > 0) { - double cards_per_entry_ratio = - (double) cards_scanned / (double) _max_rs_lengths; - if (collector_state()->last_gc_was_young()) { - _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); - } else { - _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); - } - } - - // This is defensive. For a while _max_rs_lengths could get - // smaller than _recorded_rs_lengths which was causing - // rs_length_diff to get very large and mess up the RSet length - // predictions. The reason was unsafe concurrent updates to the - // _inc_cset_recorded_rs_lengths field which the code below guards - // against (see CR 7118202). This bug has now been fixed (see CR - // 7119027). However, I'm still worried that - // _inc_cset_recorded_rs_lengths might still end up somewhat - // inaccurate. The concurrent refinement thread calculates an - // RSet's length concurrently with other CR threads updating it - // which might cause it to calculate the length incorrectly (if, - // say, it's in mid-coarsening). So I'll leave in the defensive - // conditional below just in case. - size_t rs_length_diff = 0; - size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths(); - if (_max_rs_lengths > recorded_rs_lengths) { - rs_length_diff = _max_rs_lengths - recorded_rs_lengths; - } - _rs_length_diff_seq->add((double) rs_length_diff); - - size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; - size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; - double cost_per_byte_ms = 0.0; - - if (copied_bytes > 0) { - cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; - if (collector_state()->in_marking_window()) { - _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); - } else { - _cost_per_byte_ms_seq->add(cost_per_byte_ms); - } - } - - if (_collection_set->young_region_length() > 0) { - _young_other_cost_per_region_ms_seq->add(young_other_time_ms() / - _collection_set->young_region_length()); - } - - if (_collection_set->old_region_length() > 0) { - _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / - _collection_set->old_region_length()); - } - - _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); - - _pending_cards_seq->add((double) _pending_cards); - _rs_lengths_seq->add((double) _max_rs_lengths); - } - - collector_state()->set_in_marking_window(new_in_marking_window); - collector_state()->set_in_marking_window_im(new_in_marking_window_im); - _free_regions_at_end_of_collection = _g1->num_free_regions(); - // IHOP control wants to know the expected young gen length if it were not - // restrained by the heap reserve. Using the actual length would make the - // prediction too small and the limit the young gen every time we get to the - // predicted target occupancy. - size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); - update_rs_lengths_prediction(); - - update_ihop_prediction(app_time_ms / 1000.0, - _bytes_allocated_in_old_since_last_gc, - last_unrestrained_young_length * HeapRegion::GrainBytes); - _bytes_allocated_in_old_since_last_gc = 0; - - _ihop_control->send_trace_event(_g1->gc_tracer_stw()); - - // Note that _mmu_tracker->max_gc_time() returns the time in seconds. - double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; - - if (update_rs_time_goal_ms < scan_hcc_time_ms) { - log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." - "Update RS time goal: %1.2fms Scan HCC time: %1.2fms", - update_rs_time_goal_ms, scan_hcc_time_ms); - - update_rs_time_goal_ms = 0; - } else { - update_rs_time_goal_ms -= scan_hcc_time_ms; - } - adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, - phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), - update_rs_time_goal_ms); - - cset_chooser()->verify(); -} - -G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { - if (G1UseAdaptiveIHOP) { - return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, - &_predictor, - G1ReservePercent, - G1HeapWastePercent); - } else { - return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); - } -} - -void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s, - size_t mutator_alloc_bytes, - size_t young_gen_size) { - // Always try to update IHOP prediction. Even evacuation failures give information - // about e.g. whether to start IHOP earlier next time. - - // Avoid using really small application times that might create samples with - // very high or very low values. They may be caused by e.g. back-to-back gcs. - double const min_valid_time = 1e-6; - - bool report = false; - - double marking_to_mixed_time = -1.0; - if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { - marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); - assert(marking_to_mixed_time > 0.0, - "Initial mark to mixed time must be larger than zero but is %.3f", - marking_to_mixed_time); - if (marking_to_mixed_time > min_valid_time) { - _ihop_control->update_marking_length(marking_to_mixed_time); - report = true; - } - } - - // As an approximation for the young gc promotion rates during marking we use - // all of them. In many applications there are only a few if any young gcs during - // marking, which makes any prediction useless. This increases the accuracy of the - // prediction. - if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { - _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); - report = true; - } - - if (report) { - report_ihop_statistics(); - } -} - -void G1CollectorPolicy::report_ihop_statistics() { - _ihop_control->print(); -} - -void G1CollectorPolicy::print_phases() { - phase_times()->print(); -} - -void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, - double update_rs_processed_buffers, - double goal_ms) { - DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); - ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); - - if (G1UseAdaptiveConcRefinement) { - const int k_gy = 3, k_gr = 6; - const double inc_k = 1.1, dec_k = 0.9; - - size_t g = cg1r->green_zone(); - if (update_rs_time > goal_ms) { - g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. - } else { - if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { - g = (size_t)MAX2(g * inc_k, g + 1.0); - } - } - // Change the refinement threads params - cg1r->set_green_zone(g); - cg1r->set_yellow_zone(g * k_gy); - cg1r->set_red_zone(g * k_gr); - cg1r->reinitialize_threads(); - - size_t processing_threshold_delta = MAX2(cg1r->green_zone() * _predictor.sigma(), 1); - size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, - cg1r->yellow_zone()); - // Change the barrier params - dcqs.set_process_completed_threshold((int)processing_threshold); - dcqs.set_max_completed_queue((int)cg1r->red_zone()); - } - - size_t curr_queue_size = dcqs.completed_buffers_num(); - if (curr_queue_size >= cg1r->yellow_zone()) { - dcqs.set_completed_queue_padding(curr_queue_size); - } else { - dcqs.set_completed_queue_padding(0); - } - dcqs.notify_if_necessary(); -} - -size_t G1CollectorPolicy::predict_rs_lengths() const { - return get_new_size_prediction(_rs_lengths_seq); -} - -size_t G1CollectorPolicy::predict_rs_length_diff() const { +size_t G1Measurements::predict_rs_length_diff() const { return get_new_size_prediction(_rs_length_diff_seq); } -double G1CollectorPolicy::predict_alloc_rate_ms() const { +double G1Measurements::predict_alloc_rate_ms() const { return get_new_prediction(_alloc_rate_ms_seq); } -double G1CollectorPolicy::predict_cost_per_card_ms() const { +double G1Measurements::predict_cost_per_card_ms() const { return get_new_prediction(_cost_per_card_ms_seq); } -double G1CollectorPolicy::predict_scan_hcc_ms() const { +double G1Measurements::predict_scan_hcc_ms() const { return get_new_prediction(_cost_scan_hcc_seq); } -double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const { +double G1Measurements::predict_rs_update_time_ms(size_t pending_cards) const { return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); } -double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const { +double G1Measurements::predict_young_cards_per_entry_ratio() const { return get_new_prediction(_young_cards_per_entry_ratio_seq); } -double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const { +double G1Measurements::predict_mixed_cards_per_entry_ratio() const { if (_mixed_cards_per_entry_ratio_seq->num() < 2) { return predict_young_cards_per_entry_ratio(); } else { @@ -1181,23 +247,23 @@ } } -size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const { - return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); +size_t G1Measurements::predict_card_num(size_t rs_length, bool gcs_are_young) const { + if (gcs_are_young) { + return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); + } else { + return (size_t) (rs_length * predict_mixed_cards_per_entry_ratio()); + } } -size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const { - return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio()); -} - -double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const { - if (collector_state()->gcs_are_young()) { +double G1Measurements::predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const { + if (gcs_are_young) { return card_num * get_new_prediction(_cost_per_entry_ms_seq); } else { return predict_mixed_rs_scan_time_ms(card_num); } } -double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const { +double G1Measurements::predict_mixed_rs_scan_time_ms(size_t card_num) const { if (_mixed_cost_per_entry_ms_seq->num() < 3) { return card_num * get_new_prediction(_cost_per_entry_ms_seq); } else { @@ -1205,7 +271,7 @@ } } -double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { +double G1Measurements::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { if (_cost_per_byte_ms_during_cm_seq->num() < 3) { return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); } else { @@ -1213,549 +279,54 @@ } } -double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const { - if (collector_state()->during_concurrent_mark()) { +double G1Measurements::predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const { + if (during_concurrent_mark) { return predict_object_copy_time_ms_during_cm(bytes_to_copy); } else { return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); } } -double G1CollectorPolicy::predict_constant_other_time_ms() const { +double G1Measurements::predict_constant_other_time_ms() const { return get_new_prediction(_constant_other_time_ms_seq); } -double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const { +double G1Measurements::predict_young_other_time_ms(size_t young_num) const { return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); } -double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const { +double G1Measurements::predict_non_young_other_time_ms(size_t non_young_num) const { return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); } -double G1CollectorPolicy::predict_remark_time_ms() const { +double G1Measurements::predict_remark_time_ms() const { return get_new_prediction(_concurrent_mark_remark_times_ms); } -double G1CollectorPolicy::predict_cleanup_time_ms() const { +double G1Measurements::predict_cleanup_time_ms() const { return get_new_prediction(_concurrent_mark_cleanup_times_ms); } -double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { - TruncatedSeq* seq = surv_rate_group->get_seq(age); - guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); - double pred = get_new_prediction(seq); - if (pred > 1.0) { - pred = 1.0; - } - return pred; +size_t G1Measurements::predict_rs_lengths() const { + return get_new_size_prediction(_rs_lengths_seq); } -double G1CollectorPolicy::predict_yg_surv_rate(int age) const { - return predict_yg_surv_rate(age, _short_lived_surv_rate_group); +size_t G1Measurements::predict_pending_cards() const { + return get_new_size_prediction(_pending_cards_seq); } -double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const { - return _short_lived_surv_rate_group->accum_surv_rate_pred(age); +void G1Measurements::update_recent_gc_times(double end_time_sec, + double pause_time_ms) { + double interval_ms = + (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; + _recent_gc_times_ms->add(pause_time_ms); + _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); + _prev_collection_pause_end_ms = end_time_sec * 1000.0; + + compute_pause_time_ratio(interval_ms, pause_time_ms); } -double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, - size_t scanned_cards) const { - return - predict_rs_update_time_ms(pending_cards) + - predict_rs_scan_time_ms(scanned_cards) + - predict_constant_other_time_ms(); +void G1Measurements::report_concurrent_mark_cleanup_times_ms(double ms) { + _concurrent_mark_cleanup_times_ms->add(ms); } -double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { - size_t rs_length = predict_rs_lengths() + predict_rs_length_diff(); - size_t card_num; - if (collector_state()->gcs_are_young()) { - card_num = predict_young_card_num(rs_length); - } else { - card_num = predict_non_young_card_num(rs_length); - } - return predict_base_elapsed_time_ms(pending_cards, card_num); -} - -size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const { - size_t bytes_to_copy; - if (hr->is_marked()) - bytes_to_copy = hr->max_live_bytes(); - else { - assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); - int age = hr->age_in_surv_rate_group(); - double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); - bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); - } - return bytes_to_copy; -} - -double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, - bool for_young_gc) const { - size_t rs_length = hr->rem_set()->occupied(); - size_t card_num; - - // Predicting the number of cards is based on which type of GC - // we're predicting for. - if (for_young_gc) { - card_num = predict_young_card_num(rs_length); - } else { - card_num = predict_non_young_card_num(rs_length); - } - size_t bytes_to_copy = predict_bytes_to_copy(hr); - - double region_elapsed_time_ms = - predict_rs_scan_time_ms(card_num) + - predict_object_copy_time_ms(bytes_to_copy); - - // The prediction of the "other" time for this region is based - // upon the region type and NOT the GC type. - if (hr->is_young()) { - region_elapsed_time_ms += predict_young_other_time_ms(1); - } else { - region_elapsed_time_ms += predict_non_young_other_time_ms(1); - } - return region_elapsed_time_ms; -} - -void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, - double elapsed_ms) { - _recent_gc_times_ms->add(elapsed_ms); - _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); - _prev_collection_pause_end_ms = end_time_sec * 1000.0; -} - -void G1CollectorPolicy::clear_ratio_check_data() { - _ratio_over_threshold_count = 0; - _ratio_over_threshold_sum = 0.0; - _pauses_since_start = 0; -} - -size_t G1CollectorPolicy::expansion_amount() { - double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; - double last_gc_overhead = _last_pause_time_ratio * 100.0; - double threshold = _gc_overhead_perc; - size_t expand_bytes = 0; - - // If the heap is at less than half its maximum size, scale the threshold down, - // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, - // though the scaling code will likely keep the increase small. - if (_g1->capacity() <= _g1->max_capacity() / 2) { - threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2); - threshold = MAX2(threshold, 1.0); - } - - // If the last GC time ratio is over the threshold, increment the count of - // times it has been exceeded, and add this ratio to the sum of exceeded - // ratios. - if (last_gc_overhead > threshold) { - _ratio_over_threshold_count++; - _ratio_over_threshold_sum += last_gc_overhead; - } - - // Check if we've had enough GC time ratio checks that were over the - // threshold to trigger an expansion. We'll also expand if we've - // reached the end of the history buffer and the average of all entries - // is still over the threshold. This indicates a smaller number of GCs were - // long enough to make the average exceed the threshold. - bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics; - if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || - (filled_history_buffer && (recent_gc_overhead > threshold))) { - size_t min_expand_bytes = HeapRegion::GrainBytes; - size_t reserved_bytes = _g1->max_capacity(); - size_t committed_bytes = _g1->capacity(); - size_t uncommitted_bytes = reserved_bytes - committed_bytes; - size_t expand_bytes_via_pct = - uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; - double scale_factor = 1.0; - - // If the current size is less than 1/4 of the Initial heap size, expand - // by half of the delta between the current and Initial sizes. IE, grow - // back quickly. - // - // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of - // the available expansion space, whichever is smaller, as the base - // expansion size. Then possibly scale this size according to how much the - // threshold has (on average) been exceeded by. If the delta is small - // (less than the StartScaleDownAt value), scale the size down linearly, but - // not by less than MinScaleDownFactor. If the delta is large (greater than - // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor - // times the base size. The scaling will be linear in the range from - // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, - // ScaleUpRange sets the rate of scaling up. - if (committed_bytes < InitialHeapSize / 4) { - expand_bytes = (InitialHeapSize - committed_bytes) / 2; - } else { - double const MinScaleDownFactor = 0.2; - double const MaxScaleUpFactor = 2; - double const StartScaleDownAt = _gc_overhead_perc; - double const StartScaleUpAt = _gc_overhead_perc * 1.5; - double const ScaleUpRange = _gc_overhead_perc * 2.0; - - double ratio_delta; - if (filled_history_buffer) { - ratio_delta = recent_gc_overhead - threshold; - } else { - ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; - } - - expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); - if (ratio_delta < StartScaleDownAt) { - scale_factor = ratio_delta / StartScaleDownAt; - scale_factor = MAX2(scale_factor, MinScaleDownFactor); - } else if (ratio_delta > StartScaleUpAt) { - scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); - scale_factor = MIN2(scale_factor, MaxScaleUpFactor); - } - } - - log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " - "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", - recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); - - expand_bytes = static_cast(expand_bytes * scale_factor); - - // Ensure the expansion size is at least the minimum growth amount - // and at most the remaining uncommitted byte size. - expand_bytes = MAX2(expand_bytes, min_expand_bytes); - expand_bytes = MIN2(expand_bytes, uncommitted_bytes); - - clear_ratio_check_data(); - } else { - // An expansion was not triggered. If we've started counting, increment - // the number of checks we've made in the current window. If we've - // reached the end of the window without resizing, clear the counters to - // start again the next time we see a ratio above the threshold. - if (_ratio_over_threshold_count > 0) { - _pauses_since_start++; - if (_pauses_since_start > NumPrevPausesForHeuristics) { - clear_ratio_check_data(); - } - } - } - - return expand_bytes; -} - -void G1CollectorPolicy::print_yg_surv_rate_info() const { -#ifndef PRODUCT - _short_lived_surv_rate_group->print_surv_rate_summary(); - // add this call for any other surv rate groups -#endif // PRODUCT -} - -bool G1CollectorPolicy::is_young_list_full() const { - uint young_list_length = _g1->young_list()->length(); - uint young_list_target_length = _young_list_target_length; - return young_list_length >= young_list_target_length; -} - -bool G1CollectorPolicy::can_expand_young_list() const { - uint young_list_length = _g1->young_list()->length(); - uint young_list_max_length = _young_list_max_length; - return young_list_length < young_list_max_length; -} - -bool G1CollectorPolicy::adaptive_young_list_length() const { - return _young_gen_sizer->adaptive_young_list_length(); -} - -void G1CollectorPolicy::update_max_gc_locker_expansion() { - uint expansion_region_num = 0; - if (GCLockerEdenExpansionPercent > 0) { - double perc = (double) GCLockerEdenExpansionPercent / 100.0; - double expansion_region_num_d = perc * (double) _young_list_target_length; - // We use ceiling so that if expansion_region_num_d is > 0.0 (but - // less than 1.0) we'll get 1. - expansion_region_num = (uint) ceil(expansion_region_num_d); - } else { - assert(expansion_region_num == 0, "sanity"); - } - _young_list_max_length = _young_list_target_length + expansion_region_num; - assert(_young_list_target_length <= _young_list_max_length, "post-condition"); -} - -// Calculates survivor space parameters. -void G1CollectorPolicy::update_survivors_policy() { - double max_survivor_regions_d = - (double) _young_list_target_length / (double) SurvivorRatio; - // We use ceiling so that if max_survivor_regions_d is > 0.0 (but - // smaller than 1.0) we'll get 1. - _max_survivor_regions = (uint) ceil(max_survivor_regions_d); - - _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( - HeapRegion::GrainWords * _max_survivor_regions, counters()); -} - -bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { - // We actually check whether we are marking here and not if we are in a - // reclamation phase. This means that we will schedule a concurrent mark - // even while we are still in the process of reclaiming memory. - bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); - if (!during_cycle) { - log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); - collector_state()->set_initiate_conc_mark_if_possible(true); - return true; - } else { - log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); - return false; - } -} - -void G1CollectorPolicy::initiate_conc_mark() { - collector_state()->set_during_initial_mark_pause(true); - collector_state()->set_initiate_conc_mark_if_possible(false); -} - -void G1CollectorPolicy::decide_on_conc_mark_initiation() { - // We are about to decide on whether this pause will be an - // initial-mark pause. - - // First, collector_state()->during_initial_mark_pause() should not be already set. We - // will set it here if we have to. However, it should be cleared by - // the end of the pause (it's only set for the duration of an - // initial-mark pause). - assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); - - if (collector_state()->initiate_conc_mark_if_possible()) { - // We had noticed on a previous pause that the heap occupancy has - // gone over the initiating threshold and we should start a - // concurrent marking cycle. So we might initiate one. - - if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { - // Initiate a new initial mark if there is no marking or reclamation going on. - initiate_conc_mark(); - log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); - } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { - // Initiate a user requested initial mark. An initial mark must be young only - // GC, so the collector state must be updated to reflect this. - collector_state()->set_gcs_are_young(true); - collector_state()->set_last_young_gc(false); - - abort_time_to_mixed_tracking(); - initiate_conc_mark(); - log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)"); - } else { - // The concurrent marking thread is still finishing up the - // previous cycle. If we start one right now the two cycles - // overlap. In particular, the concurrent marking thread might - // be in the process of clearing the next marking bitmap (which - // we will use for the next cycle if we start one). Starting a - // cycle now will be bad given that parts of the marking - // information might get cleared by the marking thread. And we - // cannot wait for the marking thread to finish the cycle as it - // periodically yields while clearing the next marking bitmap - // and, if it's in a yield point, it's waiting for us to - // finish. So, at this point we will not start a cycle and we'll - // let the concurrent marking thread complete the last one. - log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); - } - } -} - -class ParKnownGarbageHRClosure: public HeapRegionClosure { - G1CollectedHeap* _g1h; - CSetChooserParUpdater _cset_updater; - -public: - ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, - uint chunk_size) : - _g1h(G1CollectedHeap::heap()), - _cset_updater(hrSorted, true /* parallel */, chunk_size) { } - - bool doHeapRegion(HeapRegion* r) { - // Do we have any marking information for this region? - if (r->is_marked()) { - // We will skip any region that's currently used as an old GC - // alloc region (we should not consider those for collection - // before we fill them up). - if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { - _cset_updater.add_region(r); - } - } - return false; - } -}; - -class ParKnownGarbageTask: public AbstractGangTask { - CollectionSetChooser* _hrSorted; - uint _chunk_size; - G1CollectedHeap* _g1; - HeapRegionClaimer _hrclaimer; - -public: - ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : - AbstractGangTask("ParKnownGarbageTask"), - _hrSorted(hrSorted), _chunk_size(chunk_size), - _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} - - void work(uint worker_id) { - ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); - _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); - } -}; - -uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { - assert(n_workers > 0, "Active gc workers should be greater than 0"); - const uint overpartition_factor = 4; - const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); - return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); -} - -void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { - cset_chooser()->clear(); - - WorkGang* workers = _g1->workers(); - uint n_workers = workers->active_workers(); - - uint n_regions = _g1->num_regions(); - uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); - cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); - ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers); - workers->run_task(&par_known_garbage_task); - - cset_chooser()->sort_regions(); - - double end_sec = os::elapsedTime(); - double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; - _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); - _prev_collection_pause_end_ms += elapsed_time_ms; - - record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); -} - -double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { - // Returns the given amount of reclaimable bytes (that represents - // the amount of reclaimable space still to be collected) as a - // percentage of the current heap capacity. - size_t capacity_bytes = _g1->capacity(); - return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; -} - -void G1CollectorPolicy::maybe_start_marking() { - if (need_to_start_conc_mark("end of GC")) { - // Note: this might have already been set, if during the last - // pause we decided to start a cycle but at the beginning of - // this pause we decided to postpone it. That's OK. - collector_state()->set_initiate_conc_mark_if_possible(true); - } -} - -G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const { - assert(!collector_state()->full_collection(), "must be"); - if (collector_state()->during_initial_mark_pause()) { - assert(collector_state()->last_gc_was_young(), "must be"); - assert(!collector_state()->last_young_gc(), "must be"); - return InitialMarkGC; - } else if (collector_state()->last_young_gc()) { - assert(!collector_state()->during_initial_mark_pause(), "must be"); - assert(collector_state()->last_gc_was_young(), "must be"); - return LastYoungGC; - } else if (!collector_state()->last_gc_was_young()) { - assert(!collector_state()->during_initial_mark_pause(), "must be"); - assert(!collector_state()->last_young_gc(), "must be"); - return MixedGC; - } else { - assert(collector_state()->last_gc_was_young(), "must be"); - assert(!collector_state()->during_initial_mark_pause(), "must be"); - assert(!collector_state()->last_young_gc(), "must be"); - return YoungOnlyGC; - } -} - -void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) { - // Manage the MMU tracker. For some reason it ignores Full GCs. - if (kind != FullGC) { - _mmu_tracker->add_pause(start, end); - } - // Manage the mutator time tracking from initial mark to first mixed gc. - switch (kind) { - case FullGC: - abort_time_to_mixed_tracking(); - break; - case Cleanup: - case Remark: - case YoungOnlyGC: - case LastYoungGC: - _initial_mark_to_mixed.add_pause(end - start); - break; - case InitialMarkGC: - _initial_mark_to_mixed.record_initial_mark_end(end); - break; - case MixedGC: - _initial_mark_to_mixed.record_mixed_gc_start(start); - break; - default: - ShouldNotReachHere(); - } -} - -void G1CollectorPolicy::abort_time_to_mixed_tracking() { - _initial_mark_to_mixed.reset(); -} - -bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, - const char* false_action_str) const { - if (cset_chooser()->is_empty()) { - log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); - return false; - } - - // Is the amount of uncollected reclaimable space above G1HeapWastePercent? - size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes(); - double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); - double threshold = (double) G1HeapWastePercent; - if (reclaimable_perc <= threshold) { - log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, - false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); - return false; - } - log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, - true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); - return true; -} - -uint G1CollectorPolicy::calc_min_old_cset_length() const { - // The min old CSet region bound is based on the maximum desired - // number of mixed GCs after a cycle. I.e., even if some old regions - // look expensive, we should add them to the CSet anyway to make - // sure we go through the available old regions in no more than the - // maximum desired number of mixed GCs. - // - // The calculation is based on the number of marked regions we added - // to the CSet chooser in the first place, not how many remain, so - // that the result is the same during all mixed GCs that follow a cycle. - - const size_t region_num = (size_t) cset_chooser()->length(); - const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); - size_t result = region_num / gc_num; - // emulate ceiling - if (result * gc_num < region_num) { - result += 1; - } - return (uint) result; -} - -uint G1CollectorPolicy::calc_max_old_cset_length() const { - // The max old CSet region bound is based on the threshold expressed - // as a percentage of the heap size. I.e., it should bound the - // number of old regions added to the CSet irrespective of how many - // of them are available. - - const G1CollectedHeap* g1h = G1CollectedHeap::heap(); - const size_t region_num = g1h->num_regions(); - const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; - size_t result = region_num * perc / 100; - // emulate ceiling - if (100 * result < region_num * perc) { - result += 1; - } - return (uint) result; -} - -void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) { - double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms); - _collection_set->finalize_old_part(time_remaining_ms); -} - diff --git a/src/share/vm/gc/g1/g1CollectorPolicy.hpp b/src/share/vm/gc/g1/g1Measurements.hpp copy from src/share/vm/gc/g1/g1CollectorPolicy.hpp copy to src/share/vm/gc/g1/g1Measurements.hpp --- a/src/share/vm/gc/g1/g1CollectorPolicy.hpp +++ b/src/share/vm/gc/g1/g1Measurements.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,51 +22,19 @@ * */ -#ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP -#define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP +#ifndef SHARE_VM_GC_G1_G1MEASUREMENTS_HPP +#define SHARE_VM_GC_G1_G1MEASUREMENTS_HPP -#include "gc/g1/g1CollectorState.hpp" -#include "gc/g1/g1GCPhaseTimes.hpp" -#include "gc/g1/g1InCSetState.hpp" -#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp" -#include "gc/g1/g1MMUTracker.hpp" -#include "gc/g1/g1Predictions.hpp" -#include "gc/shared/collectorPolicy.hpp" -#include "utilities/pair.hpp" +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" -// A G1CollectorPolicy makes policy decisions that determine the -// characteristics of the collector. Examples include: -// * choice of collection set. -// * when to collect. +class TruncatedSeq; +class G1Predictions; -class HeapRegion; -class G1CollectionSet; -class CollectionSetChooser; -class G1IHOPControl; -class G1YoungGenSizer; - -class G1CollectorPolicy: public CollectorPolicy { - private: - G1IHOPControl* _ihop_control; - - G1IHOPControl* create_ihop_control() const; - // Update the IHOP control with necessary statistics. - void update_ihop_prediction(double mutator_time_s, - size_t mutator_alloc_bytes, - size_t young_gen_size); - void report_ihop_statistics(); - - G1Predictions _predictor; - - double get_new_prediction(TruncatedSeq const* seq) const; - size_t get_new_size_prediction(TruncatedSeq const* seq) const; - - G1MMUTracker* _mmu_tracker; - - void initialize_alignments(); - void initialize_flags(); - - double _full_collection_start_sec; +class G1Measurements: public CHeapObj { + const static int TruncatedSeqLength = 10; + const static int NumPrevPausesForHeuristics = 10; + G1Predictions* _predictor; // These exclude marking times. TruncatedSeq* _recent_gc_times_ms; @@ -74,36 +42,6 @@ TruncatedSeq* _concurrent_mark_remark_times_ms; TruncatedSeq* _concurrent_mark_cleanup_times_ms; - // Ratio check data for determining if heap growth is necessary. - uint _ratio_over_threshold_count; - double _ratio_over_threshold_sum; - uint _pauses_since_start; - - uint _young_list_target_length; - uint _young_list_fixed_length; - - // The max number of regions we can extend the eden by while the GC - // locker is active. This should be >= _young_list_target_length; - uint _young_list_max_length; - - SurvRateGroup* _short_lived_surv_rate_group; - SurvRateGroup* _survivor_surv_rate_group; - // add here any more surv rate groups - - double _gc_overhead_perc; - - double _reserve_factor; - uint _reserve_regions; - - enum PredictionConstants { - TruncatedSeqLength = 10, - NumPrevPausesForHeuristics = 10, - // MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics, - // representing the minimum number of pause time ratios that exceed - // GCTimeRatio before a heap expansion will be triggered. - MinOverThresholdForGrowth = 4 - }; - TruncatedSeq* _alloc_rate_ms_seq; double _prev_collection_pause_end_ms; @@ -124,64 +62,56 @@ TruncatedSeq* _cost_per_byte_ms_during_cm_seq; - G1YoungGenSizer* _young_gen_sizer; + // Statistics kept per GC stoppage, pause or full. + TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; - uint _free_regions_at_end_of_collection; + // The ratio of gc time to elapsed time, computed over recent pauses, + // and the ratio for just the last pause. + double _recent_avg_pause_time_ratio; + double _last_pause_time_ratio; - size_t _max_rs_lengths; + double get_new_prediction(TruncatedSeq const* seq) const; + size_t get_new_size_prediction(TruncatedSeq const* seq) const; - size_t _rs_lengths_prediction; + void compute_pause_time_ratio(double interval_ms, double pause_time_ms); +public: + G1Measurements(G1Predictions* predictor); -#ifndef PRODUCT - bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); -#endif // PRODUCT - - void adjust_concurrent_refinement(double update_rs_time, - double update_rs_processed_buffers, - double goal_ms); - - double _pause_time_target_ms; - - size_t _pending_cards; - - // The amount of allocated bytes in old gen during the last mutator and the following - // young GC phase. - size_t _bytes_allocated_in_old_since_last_gc; - - G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed; -public: - const G1Predictions& predictor() const { return _predictor; } - - // Add the given number of bytes to the total number of allocated bytes in the old gen. - void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; } - - // Accessors - - void set_region_eden(HeapRegion* hr, int young_index_in_cset) { - hr->set_eden(); - hr->install_surv_rate_group(_short_lived_surv_rate_group); - hr->set_young_index_in_cset(young_index_in_cset); + double prev_collection_pause_end_ms() const { + return _prev_collection_pause_end_ms; } - void set_region_survivor(HeapRegion* hr, int young_index_in_cset) { - assert(hr->is_survivor(), "pre-condition"); - hr->install_surv_rate_group(_survivor_surv_rate_group); - hr->set_young_index_in_cset(young_index_in_cset); + double recent_avg_pause_time_ratio() const { + return _recent_avg_pause_time_ratio; } -#ifndef PRODUCT - bool verify_young_ages(); -#endif // PRODUCT - - void record_max_rs_lengths(size_t rs_lengths) { - _max_rs_lengths = rs_lengths; + double last_pause_time_ratio() const { + return _last_pause_time_ratio; } - size_t predict_rs_lengths() const; + void append_prev_collection_pause_end_ms(double ms) { + _prev_collection_pause_end_ms += ms; + } + + void report_concurrent_mark_remark_times_ms(double ms); + void report_concurrent_mark_cleanup_times_ms(double ms); + void report_alloc_rate_ms(double alloc_rate); + void report_cost_per_card_ms(double cost_per_card_ms); + void report_cost_scan_hcc(double cost_scan_hcc); + void report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young); + void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young); + void report_rs_length_diff(double rs_length_diff); + void report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window); + void report_young_other_cost_per_region_ms(double other_cost_per_region_ms); + void report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms); + void report_constant_other_time_ms(double constant_other_time_ms); + void report_pending_cards(double pending_cards); + void report_rs_lengths(double rs_lengths); size_t predict_rs_length_diff() const; double predict_alloc_rate_ms() const; + int num_alloc_rate_ms() const; double predict_cost_per_card_ms() const; @@ -193,17 +123,15 @@ double predict_mixed_cards_per_entry_ratio() const; - size_t predict_young_card_num(size_t rs_length) const; + size_t predict_card_num(size_t rs_length, bool gcs_are_young) const; - size_t predict_non_young_card_num(size_t rs_length) const; - - double predict_rs_scan_time_ms(size_t card_num) const; + double predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const; double predict_mixed_rs_scan_time_ms(size_t card_num) const; double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const; - double predict_object_copy_time_ms(size_t bytes_to_copy) const; + double predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const; double predict_constant_other_time_ms() const; @@ -211,360 +139,15 @@ double predict_non_young_other_time_ms(size_t non_young_num) const; - double predict_base_elapsed_time_ms(size_t pending_cards) const; - double predict_base_elapsed_time_ms(size_t pending_cards, - size_t scanned_cards) const; - size_t predict_bytes_to_copy(HeapRegion* hr) const; - double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const; - - double predict_survivor_regions_evac_time() const; - - bool should_update_surv_rate_group_predictors() { - return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window(); - } - - void cset_regions_freed() { - bool update = should_update_surv_rate_group_predictors(); - - _short_lived_surv_rate_group->all_surviving_words_recorded(update); - _survivor_surv_rate_group->all_surviving_words_recorded(update); - } - - G1MMUTracker* mmu_tracker() { - return _mmu_tracker; - } - - const G1MMUTracker* mmu_tracker() const { - return _mmu_tracker; - } - - double max_pause_time_ms() const { - return _mmu_tracker->max_gc_time() * 1000.0; - } - double predict_remark_time_ms() const; double predict_cleanup_time_ms() const; - // Returns an estimate of the survival rate of the region at yg-age - // "yg_age". - double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const; - - double predict_yg_surv_rate(int age) const; - - double accum_yg_surv_rate_pred(int age) const; - -protected: - G1CollectionSet* _collection_set; - virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const; - virtual double other_time_ms(double pause_time_ms) const; - - double young_other_time_ms() const; - double non_young_other_time_ms() const; - double constant_other_time_ms(double pause_time_ms) const; - - CollectionSetChooser* cset_chooser() const; -private: - // Statistics kept per GC stoppage, pause or full. - TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; + size_t predict_rs_lengths() const; + size_t predict_pending_cards() const; // Add a new GC of the given duration and end time to the record. void update_recent_gc_times(double end_time_sec, double elapsed_ms); - - // The number of bytes copied during the GC. - size_t _bytes_copied_during_gc; - - // Stash a pointer to the g1 heap. - G1CollectedHeap* _g1; - - G1GCPhaseTimes* _phase_times; - - // The ratio of gc time to elapsed time, computed over recent pauses, - // and the ratio for just the last pause. - double _recent_avg_pause_time_ratio; - double _last_pause_time_ratio; - - double recent_avg_pause_time_ratio() const { - return _recent_avg_pause_time_ratio; - } - - // This set of variables tracks the collector efficiency, in order to - // determine whether we should initiate a new marking. - double _mark_remark_start_sec; - double _mark_cleanup_start_sec; - - // Updates the internal young list maximum and target lengths. Returns the - // unbounded young list target length. - uint update_young_list_max_and_target_length(); - uint update_young_list_max_and_target_length(size_t rs_lengths); - - // Update the young list target length either by setting it to the - // desired fixed value or by calculating it using G1's pause - // prediction model. If no rs_lengths parameter is passed, predict - // the RS lengths using the prediction model, otherwise use the - // given rs_lengths as the prediction. - // Returns the unbounded young list target length. - uint update_young_list_target_length(size_t rs_lengths); - - // Calculate and return the minimum desired young list target - // length. This is the minimum desired young list length according - // to the user's inputs. - uint calculate_young_list_desired_min_length(uint base_min_length) const; - - // Calculate and return the maximum desired young list target - // length. This is the maximum desired young list length according - // to the user's inputs. - uint calculate_young_list_desired_max_length() const; - - // Calculate and return the maximum young list target length that - // can fit into the pause time goal. The parameters are: rs_lengths - // represent the prediction of how large the young RSet lengths will - // be, base_min_length is the already existing number of regions in - // the young list, min_length and max_length are the desired min and - // max young list length according to the user's inputs. - uint calculate_young_list_target_length(size_t rs_lengths, - uint base_min_length, - uint desired_min_length, - uint desired_max_length) const; - - // Result of the bounded_young_list_target_length() method, containing both the - // bounded as well as the unbounded young list target lengths in this order. - typedef Pair YoungTargetLengths; - YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const; - - void update_rs_lengths_prediction(); - void update_rs_lengths_prediction(size_t prediction); - - // Calculate and return chunk size (in number of regions) for parallel - // concurrent mark cleanup. - uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const; - - // Check whether a given young length (young_length) fits into the - // given target pause time and whether the prediction for the amount - // of objects to be copied for the given length will fit into the - // given free space (expressed by base_free_regions). It is used by - // calculate_young_list_target_length(). - bool predict_will_fit(uint young_length, double base_time_ms, - uint base_free_regions, double target_pause_time_ms) const; - -public: - size_t pending_cards() const { return _pending_cards; } - - // Calculate the minimum number of old regions we'll add to the CSet - // during a mixed GC. - uint calc_min_old_cset_length() const; - - // Calculate the maximum number of old regions we'll add to the CSet - // during a mixed GC. - uint calc_max_old_cset_length() const; - - // Returns the given amount of uncollected reclaimable space - // as a percentage of the current heap capacity. - double reclaimable_bytes_perc(size_t reclaimable_bytes) const; - -private: - // Sets up marking if proper conditions are met. - void maybe_start_marking(); - - // The kind of STW pause. - enum PauseKind { - FullGC, - YoungOnlyGC, - MixedGC, - LastYoungGC, - InitialMarkGC, - Cleanup, - Remark - }; - - // Calculate PauseKind from internal state. - PauseKind young_gc_pause_kind() const; - // Record the given STW pause with the given start and end times (in s). - void record_pause(PauseKind kind, double start, double end); - // Indicate that we aborted marking before doing any mixed GCs. - void abort_time_to_mixed_tracking(); -public: - - G1CollectorPolicy(); - - virtual ~G1CollectorPolicy(); - - virtual G1CollectorPolicy* as_g1_policy() { return this; } - - G1CollectorState* collector_state() const; - - G1GCPhaseTimes* phase_times() const { return _phase_times; } - - // Check the current value of the young list RSet lengths and - // compare it against the last prediction. If the current value is - // higher, recalculate the young list target length prediction. - void revise_young_list_target_length_if_necessary(size_t rs_lengths); - - // This should be called after the heap is resized. - void record_new_heap_size(uint new_number_of_regions); - - void init(); - - virtual void note_gc_start(uint num_active_workers); - - // Create jstat counters for the policy. - virtual void initialize_gc_policy_counters(); - - bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); - - bool about_to_start_mixed_phase() const; - - // Record the start and end of an evacuation pause. - void record_collection_pause_start(double start_time_sec); - void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc); - - // Record the start and end of a full collection. - void record_full_collection_start(); - void record_full_collection_end(); - - // Must currently be called while the world is stopped. - void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); - - // Record start and end of remark. - void record_concurrent_mark_remark_start(); - void record_concurrent_mark_remark_end(); - - // Record start, end, and completion of cleanup. - void record_concurrent_mark_cleanup_start(); - void record_concurrent_mark_cleanup_end(); - void record_concurrent_mark_cleanup_completed(); - - virtual void print_phases(); - - // Record how much space we copied during a GC. This is typically - // called when a GC alloc region is being retired. - void record_bytes_copied_during_gc(size_t bytes) { - _bytes_copied_during_gc += bytes; - } - - // The amount of space we copied during a GC. - size_t bytes_copied_during_gc() const { - return _bytes_copied_during_gc; - } - - // Determine whether there are candidate regions so that the - // next GC should be mixed. The two action strings are used - // in the ergo output when the method returns true or false. - bool next_gc_should_be_mixed(const char* true_action_str, - const char* false_action_str) const; - - virtual void finalize_collection_set(double target_pause_time_ms); -private: - // Set the state to start a concurrent marking cycle and clear - // _initiate_conc_mark_if_possible because it has now been - // acted on. - void initiate_conc_mark(); - -public: - // This sets the initiate_conc_mark_if_possible() flag to start a - // new cycle, as long as we are not already in one. It's best if it - // is called during a safepoint when the test whether a cycle is in - // progress or not is stable. - bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); - - // This is called at the very beginning of an evacuation pause (it - // has to be the first thing that the pause does). If - // initiate_conc_mark_if_possible() is true, and the concurrent - // marking thread has completed its work during the previous cycle, - // it will set during_initial_mark_pause() to so that the pause does - // the initial-mark work and start a marking cycle. - void decide_on_conc_mark_initiation(); - - // If an expansion would be appropriate, because recent GC overhead had - // exceeded the desired limit, return an amount to expand by. - virtual size_t expansion_amount(); - - // Clear ratio tracking data used by expansion_amount(). - void clear_ratio_check_data(); - - // Print stats on young survival ratio - void print_yg_surv_rate_info() const; - - void finished_recalculating_age_indexes(bool is_survivors) { - if (is_survivors) { - _survivor_surv_rate_group->finished_recalculating_age_indexes(); - } else { - _short_lived_surv_rate_group->finished_recalculating_age_indexes(); - } - // do that for any other surv rate groups - } - - size_t young_list_target_length() const { return _young_list_target_length; } - - bool is_young_list_full() const; - - bool can_expand_young_list() const; - - uint young_list_max_length() const { - return _young_list_max_length; - } - - bool adaptive_young_list_length() const; - - virtual bool should_process_references() const { - return true; - } - -private: - // - // Survivor regions policy. - // - - // Current tenuring threshold, set to 0 if the collector reaches the - // maximum amount of survivors regions. - uint _tenuring_threshold; - - // The limit on the number of regions allocated for survivors. - uint _max_survivor_regions; - - AgeTable _survivors_age_table; - -public: - uint tenuring_threshold() const { return _tenuring_threshold; } - - uint max_survivor_regions() { - return _max_survivor_regions; - } - - static const uint REGIONS_UNLIMITED = (uint) -1; - - uint max_regions(InCSetState dest) const { - switch (dest.value()) { - case InCSetState::Young: - return _max_survivor_regions; - case InCSetState::Old: - return REGIONS_UNLIMITED; - default: - assert(false, "Unknown dest state: " CSETSTATE_FORMAT, dest.value()); - break; - } - // keep some compilers happy - return 0; - } - - void note_start_adding_survivor_regions() { - _survivor_surv_rate_group->start_adding_regions(); - } - - void note_stop_adding_survivor_regions() { - _survivor_surv_rate_group->stop_adding_regions(); - } - - void record_age_table(AgeTable* age_table) { - _survivors_age_table.merge(age_table); - } - - void update_max_gc_locker_expansion(); - - // Calculates survivor space parameters. - void update_survivors_policy(); - - virtual void post_heap_initialize(); }; -#endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP +#endif // SHARE_VM_GC_G1_G1MEASUREMENTS_HPP # HG changeset patch # User mgerdin # Date 1458136643 -3600 # Wed Mar 16 14:57:23 2016 +0100 # Node ID d2959347e44b1180958a69ba4eb4a0889dd5543c # Parent 2861183ab82e4fdca18a84f06a2da28d4811402f [mq]: rename-to-analytics diff --git a/src/share/vm/gc/g1/concurrentMarkThread.cpp b/src/share/vm/gc/g1/concurrentMarkThread.cpp --- a/src/share/vm/gc/g1/concurrentMarkThread.cpp +++ b/src/share/vm/gc/g1/concurrentMarkThread.cpp @@ -25,9 +25,9 @@ #include "precompiled.hpp" #include "classfile/classLoaderData.hpp" #include "gc/g1/concurrentMarkThread.inline.hpp" +#include "gc/g1/g1Analytics.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" -#include "gc/g1/g1Measurements.hpp" #include "gc/g1/g1MMUTracker.hpp" #include "gc/g1/suspendibleThreadSet.hpp" #include "gc/g1/vm_operations_g1.hpp" @@ -83,11 +83,11 @@ // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU. void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) { - const G1Measurements* measurements = g1_policy->measurements(); + const G1Analytics* analytics = g1_policy->analytics(); if (g1_policy->adaptive_young_list_length()) { double now = os::elapsedTime(); - double prediction_ms = remark ? measurements->predict_remark_time_ms() - : measurements->predict_cleanup_time_ms(); + double prediction_ms = remark ? analytics->predict_remark_time_ms() + : analytics->predict_cleanup_time_ms(); G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms); os::sleep(this, sleep_time_ms, false); diff --git a/src/share/vm/gc/g1/g1Measurements.cpp b/src/share/vm/gc/g1/g1Analytics.cpp rename from src/share/vm/gc/g1/g1Measurements.cpp rename to src/share/vm/gc/g1/g1Analytics.cpp --- a/src/share/vm/gc/g1/g1Measurements.cpp +++ b/src/share/vm/gc/g1/g1Analytics.cpp @@ -23,7 +23,7 @@ */ #include "precompiled.hpp" -#include "gc/g1/g1Measurements.hpp" +#include "gc/g1/g1Analytics.hpp" #include "gc/g1/g1Predictions.hpp" #include "runtime/os.hpp" #include "utilities/debug.hpp" @@ -69,7 +69,7 @@ 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 }; -G1Measurements::G1Measurements(G1Predictions* predictor) : +G1Analytics::G1Analytics(const G1Predictions* predictor) : _predictor(predictor), _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), @@ -101,42 +101,39 @@ _rs_length_diff_seq->add(rs_length_diff_defaults[index]); _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); _cost_scan_hcc_seq->add(0.0); - _young_cards_per_entry_ratio_seq->add( - young_cards_per_entry_ratio_defaults[index]); + _young_cards_per_entry_ratio_seq->add(young_cards_per_entry_ratio_defaults[index]); _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); - _young_other_cost_per_region_ms_seq->add( - young_other_cost_per_region_ms_defaults[index]); - _non_young_other_cost_per_region_ms_seq->add( - non_young_other_cost_per_region_ms_defaults[index]); + _young_other_cost_per_region_ms_seq->add(young_other_cost_per_region_ms_defaults[index]); + _non_young_other_cost_per_region_ms_seq->add(non_young_other_cost_per_region_ms_defaults[index]); // start conservatively (around 50ms is about right) _concurrent_mark_remark_times_ms->add(0.05); _concurrent_mark_cleanup_times_ms->add(0.20); } -double G1Measurements::get_new_prediction(TruncatedSeq const* seq) const { +double G1Analytics::get_new_prediction(TruncatedSeq const* seq) const { return _predictor->get_new_prediction(seq); } -size_t G1Measurements::get_new_size_prediction(TruncatedSeq const* seq) const { +size_t G1Analytics::get_new_size_prediction(TruncatedSeq const* seq) const { return (size_t)get_new_prediction(seq); } -int G1Measurements::num_alloc_rate_ms() const { +int G1Analytics::num_alloc_rate_ms() const { return _alloc_rate_ms_seq->num(); } -void G1Measurements::report_concurrent_mark_remark_times_ms(double ms) { +void G1Analytics::report_concurrent_mark_remark_times_ms(double ms) { _concurrent_mark_remark_times_ms->add(ms); } -void G1Measurements::report_alloc_rate_ms(double alloc_rate) { +void G1Analytics::report_alloc_rate_ms(double alloc_rate) { _alloc_rate_ms_seq->add(alloc_rate); } -void G1Measurements::compute_pause_time_ratio(double interval_ms, double pause_time_ms) { +void G1Analytics::compute_pause_time_ratio(double interval_ms, double pause_time_ms) { _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms; if (_recent_avg_pause_time_ratio < 0.0 || (_recent_avg_pause_time_ratio - 1.0 > 0.0)) { @@ -159,15 +156,15 @@ (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; } -void G1Measurements::report_cost_per_card_ms(double cost_per_card_ms) { +void G1Analytics::report_cost_per_card_ms(double cost_per_card_ms) { _cost_per_card_ms_seq->add(cost_per_card_ms); } -void G1Measurements::report_cost_scan_hcc(double cost_scan_hcc) { +void G1Analytics::report_cost_scan_hcc(double cost_scan_hcc) { _cost_scan_hcc_seq->add(cost_scan_hcc); } -void G1Measurements::report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young) { +void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young) { if (last_gc_was_young) { _cost_per_entry_ms_seq->add(cost_per_entry_ms); } else { @@ -175,7 +172,7 @@ } } -void G1Measurements::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young) { +void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young) { if (last_gc_was_young) { _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); } else { @@ -183,11 +180,11 @@ } } -void G1Measurements::report_rs_length_diff(double rs_length_diff) { +void G1Analytics::report_rs_length_diff(double rs_length_diff) { _rs_length_diff_seq->add(rs_length_diff); } -void G1Measurements::report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window) { +void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window) { if (in_marking_window) { _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); } else { @@ -195,51 +192,51 @@ } } -void G1Measurements::report_young_other_cost_per_region_ms(double other_cost_per_region_ms) { +void G1Analytics::report_young_other_cost_per_region_ms(double other_cost_per_region_ms) { _young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms); } -void G1Measurements::report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms) { +void G1Analytics::report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms) { _non_young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms); } -void G1Measurements::report_constant_other_time_ms(double constant_other_time_ms) { +void G1Analytics::report_constant_other_time_ms(double constant_other_time_ms) { _constant_other_time_ms_seq->add(constant_other_time_ms); } -void G1Measurements::report_pending_cards(double pending_cards) { +void G1Analytics::report_pending_cards(double pending_cards) { _pending_cards_seq->add(pending_cards); } -void G1Measurements::report_rs_lengths(double rs_lengths) { +void G1Analytics::report_rs_lengths(double rs_lengths) { _rs_lengths_seq->add(rs_lengths); } -size_t G1Measurements::predict_rs_length_diff() const { +size_t G1Analytics::predict_rs_length_diff() const { return get_new_size_prediction(_rs_length_diff_seq); } -double G1Measurements::predict_alloc_rate_ms() const { +double G1Analytics::predict_alloc_rate_ms() const { return get_new_prediction(_alloc_rate_ms_seq); } -double G1Measurements::predict_cost_per_card_ms() const { +double G1Analytics::predict_cost_per_card_ms() const { return get_new_prediction(_cost_per_card_ms_seq); } -double G1Measurements::predict_scan_hcc_ms() const { +double G1Analytics::predict_scan_hcc_ms() const { return get_new_prediction(_cost_scan_hcc_seq); } -double G1Measurements::predict_rs_update_time_ms(size_t pending_cards) const { +double G1Analytics::predict_rs_update_time_ms(size_t pending_cards) const { return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); } -double G1Measurements::predict_young_cards_per_entry_ratio() const { +double G1Analytics::predict_young_cards_per_entry_ratio() const { return get_new_prediction(_young_cards_per_entry_ratio_seq); } -double G1Measurements::predict_mixed_cards_per_entry_ratio() const { +double G1Analytics::predict_mixed_cards_per_entry_ratio() const { if (_mixed_cards_per_entry_ratio_seq->num() < 2) { return predict_young_cards_per_entry_ratio(); } else { @@ -247,7 +244,7 @@ } } -size_t G1Measurements::predict_card_num(size_t rs_length, bool gcs_are_young) const { +size_t G1Analytics::predict_card_num(size_t rs_length, bool gcs_are_young) const { if (gcs_are_young) { return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); } else { @@ -255,7 +252,7 @@ } } -double G1Measurements::predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const { +double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const { if (gcs_are_young) { return card_num * get_new_prediction(_cost_per_entry_ms_seq); } else { @@ -263,7 +260,7 @@ } } -double G1Measurements::predict_mixed_rs_scan_time_ms(size_t card_num) const { +double G1Analytics::predict_mixed_rs_scan_time_ms(size_t card_num) const { if (_mixed_cost_per_entry_ms_seq->num() < 3) { return card_num * get_new_prediction(_cost_per_entry_ms_seq); } else { @@ -271,7 +268,7 @@ } } -double G1Measurements::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { +double G1Analytics::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { if (_cost_per_byte_ms_during_cm_seq->num() < 3) { return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); } else { @@ -279,7 +276,7 @@ } } -double G1Measurements::predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const { +double G1Analytics::predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const { if (during_concurrent_mark) { return predict_object_copy_time_ms_during_cm(bytes_to_copy); } else { @@ -287,46 +284,46 @@ } } -double G1Measurements::predict_constant_other_time_ms() const { +double G1Analytics::predict_constant_other_time_ms() const { return get_new_prediction(_constant_other_time_ms_seq); } -double G1Measurements::predict_young_other_time_ms(size_t young_num) const { +double G1Analytics::predict_young_other_time_ms(size_t young_num) const { return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); } -double G1Measurements::predict_non_young_other_time_ms(size_t non_young_num) const { +double G1Analytics::predict_non_young_other_time_ms(size_t non_young_num) const { return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); } -double G1Measurements::predict_remark_time_ms() const { +double G1Analytics::predict_remark_time_ms() const { return get_new_prediction(_concurrent_mark_remark_times_ms); } -double G1Measurements::predict_cleanup_time_ms() const { +double G1Analytics::predict_cleanup_time_ms() const { return get_new_prediction(_concurrent_mark_cleanup_times_ms); } -size_t G1Measurements::predict_rs_lengths() const { +size_t G1Analytics::predict_rs_lengths() const { return get_new_size_prediction(_rs_lengths_seq); } -size_t G1Measurements::predict_pending_cards() const { +size_t G1Analytics::predict_pending_cards() const { return get_new_size_prediction(_pending_cards_seq); } -void G1Measurements::update_recent_gc_times(double end_time_sec, +double G1Analytics::last_known_gc_end_time_sec() const { + return _recent_prev_end_times_for_all_gcs_sec->oldest(); +} + +void G1Analytics::update_recent_gc_times(double end_time_sec, double pause_time_ms) { - double interval_ms = - (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; _recent_gc_times_ms->add(pause_time_ms); _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); _prev_collection_pause_end_ms = end_time_sec * 1000.0; - - compute_pause_time_ratio(interval_ms, pause_time_ms); } -void G1Measurements::report_concurrent_mark_cleanup_times_ms(double ms) { +void G1Analytics::report_concurrent_mark_cleanup_times_ms(double ms) { _concurrent_mark_cleanup_times_ms->add(ms); } diff --git a/src/share/vm/gc/g1/g1Measurements.hpp b/src/share/vm/gc/g1/g1Analytics.hpp rename from src/share/vm/gc/g1/g1Measurements.hpp rename to src/share/vm/gc/g1/g1Analytics.hpp --- a/src/share/vm/gc/g1/g1Measurements.hpp +++ b/src/share/vm/gc/g1/g1Analytics.hpp @@ -31,10 +31,10 @@ class TruncatedSeq; class G1Predictions; -class G1Measurements: public CHeapObj { +class G1Analytics: public CHeapObj { const static int TruncatedSeqLength = 10; const static int NumPrevPausesForHeuristics = 10; - G1Predictions* _predictor; + const G1Predictions* _predictor; // These exclude marking times. TruncatedSeq* _recent_gc_times_ms; @@ -73,9 +73,8 @@ double get_new_prediction(TruncatedSeq const* seq) const; size_t get_new_size_prediction(TruncatedSeq const* seq) const; - void compute_pause_time_ratio(double interval_ms, double pause_time_ms); public: - G1Measurements(G1Predictions* predictor); + G1Analytics(const G1Predictions* predictor); double prev_collection_pause_end_ms() const { return _prev_collection_pause_end_ms; @@ -148,6 +147,9 @@ // Add a new GC of the given duration and end time to the record. void update_recent_gc_times(double end_time_sec, double elapsed_ms); + void compute_pause_time_ratio(double interval_ms, double pause_time_ms); + + double last_known_gc_end_time_sec() const; }; #endif // SHARE_VM_GC_G1_G1MEASUREMENTS_HPP diff --git a/src/share/vm/gc/g1/g1CollectorPolicy.cpp b/src/share/vm/gc/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc/g1/g1CollectorPolicy.cpp @@ -25,13 +25,13 @@ #include "precompiled.hpp" #include "gc/g1/concurrentG1Refine.hpp" #include "gc/g1/concurrentMarkThread.inline.hpp" +#include "gc/g1/g1Analytics.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1IHOPControl.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" -#include "gc/g1/g1Measurements.hpp" #include "gc/g1/g1YoungGenSizer.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" @@ -44,7 +44,7 @@ G1CollectorPolicy::G1CollectorPolicy() : _predictor(G1ConfidencePercent / 100.0), - _measurements(new G1Measurements(&_predictor)), + _analytics(new G1Analytics(&_predictor)), _pause_time_target_ms((double) MaxGCPauseMillis), _rs_lengths_prediction(0), _max_survivor_regions(0), @@ -240,9 +240,9 @@ double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); size_t bytes_to_copy = (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); - double copy_time_ms = _measurements->predict_object_copy_time_ms(bytes_to_copy, + double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark()); - double young_other_time_ms = _measurements->predict_young_other_time_ms(young_length); + double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length); double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; if (pause_time_ms > target_pause_time_ms) { // end condition 2: prediction is over the target pause time @@ -286,10 +286,10 @@ uint base_min_length) const { uint desired_min_length = 0; if (adaptive_young_list_length()) { - if (_measurements->num_alloc_rate_ms() > 3) { + if (_analytics->num_alloc_rate_ms() > 3) { double now_sec = os::elapsedTime(); double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; - double alloc_rate_ms = _measurements->predict_alloc_rate_ms(); + double alloc_rate_ms = _analytics->predict_alloc_rate_ms(); desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); } else { // otherwise we don't have enough info to make the prediction @@ -308,7 +308,7 @@ } uint G1CollectorPolicy::update_young_list_max_and_target_length() { - return update_young_list_max_and_target_length(_measurements->predict_rs_lengths()); + return update_young_list_max_and_target_length(_analytics->predict_rs_lengths()); } uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { @@ -413,9 +413,9 @@ double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; double survivor_regions_evac_time = predict_survivor_regions_evac_time(); - size_t pending_cards = _measurements->predict_pending_cards(); - size_t adj_rs_lengths = rs_lengths + _measurements->predict_rs_length_diff(); - size_t scanned_cards = _measurements->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true); + size_t pending_cards = _analytics->predict_pending_cards(); + size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff(); + size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true); double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards) + survivor_regions_evac_time; @@ -515,7 +515,7 @@ } void G1CollectorPolicy::update_rs_lengths_prediction() { - update_rs_lengths_prediction(_measurements->predict_rs_lengths()); + update_rs_lengths_prediction(_analytics->predict_rs_lengths()); } void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { @@ -583,7 +583,7 @@ double full_gc_time_sec = end_sec - _full_collection_start_sec; double full_gc_time_ms = full_gc_time_sec * 1000.0; - _measurements->update_recent_gc_times(end_sec, full_gc_time_ms); + _analytics->update_recent_gc_times(end_sec, full_gc_time_ms); collector_state()->set_full_collection(false); @@ -651,8 +651,8 @@ void G1CollectorPolicy::record_concurrent_mark_remark_end() { double end_time_sec = os::elapsedTime(); double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; - _measurements->report_concurrent_mark_remark_times_ms(elapsed_time_ms); - _measurements->append_prev_collection_pause_end_ms(elapsed_time_ms); + _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms); + _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); record_pause(Remark, _mark_remark_start_sec, end_time_sec); } @@ -751,7 +751,7 @@ maybe_start_marking(); } - double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _measurements->prev_collection_pause_end_ms()); + double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms()); if (app_time_ms < MIN_TIMER_GRANULARITY) { // This usually happens due to the timer not having the required // granularity. Some Linuxes are the usual culprits. @@ -770,9 +770,12 @@ // place we can safely ignore them here. uint regions_allocated = _collection_set->eden_region_length(); double alloc_rate_ms = (double) regions_allocated / app_time_ms; - _measurements->report_alloc_rate_ms(alloc_rate_ms); + _analytics->report_alloc_rate_ms(alloc_rate_ms); - _measurements->update_recent_gc_times(end_time_sec, pause_time_ms); + double interval_ms = + (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0; + _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); + _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms); } bool new_in_marking_window = collector_state()->in_marking_window(); @@ -818,20 +821,20 @@ double cost_per_card_ms = 0.0; if (_pending_cards > 0) { cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards; - _measurements->report_cost_per_card_ms(cost_per_card_ms); + _analytics->report_cost_per_card_ms(cost_per_card_ms); } - _measurements->report_cost_scan_hcc(scan_hcc_time_ms); + _analytics->report_cost_scan_hcc(scan_hcc_time_ms); double cost_per_entry_ms = 0.0; if (cards_scanned > 10) { cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; - _measurements->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young()); + _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young()); } if (_max_rs_lengths > 0) { double cards_per_entry_ratio = (double) cards_scanned / (double) _max_rs_lengths; - _measurements->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young()); + _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young()); } // This is defensive. For a while _max_rs_lengths could get @@ -852,7 +855,7 @@ if (_max_rs_lengths > recorded_rs_lengths) { rs_length_diff = _max_rs_lengths - recorded_rs_lengths; } - _measurements->report_rs_length_diff((double) rs_length_diff); + _analytics->report_rs_length_diff((double) rs_length_diff); size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes; @@ -860,23 +863,23 @@ if (copied_bytes > 0) { cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; - _measurements->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window()); + _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window()); } if (_collection_set->young_region_length() > 0) { - _measurements->report_young_other_cost_per_region_ms(young_other_time_ms() / + _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() / _collection_set->young_region_length()); } if (_collection_set->old_region_length() > 0) { - _measurements->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / + _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() / _collection_set->old_region_length()); } - _measurements->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); + _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms)); - _measurements->report_pending_cards((double) _pending_cards); - _measurements->report_rs_lengths((double) _max_rs_lengths); + _analytics->report_pending_cards((double) _pending_cards); + _analytics->report_rs_lengths((double) _max_rs_lengths); } collector_state()->set_in_marking_window(new_in_marking_window); @@ -1034,14 +1037,14 @@ double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, size_t scanned_cards) const { return - _measurements->predict_rs_update_time_ms(pending_cards) + - _measurements->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) + - _measurements->predict_constant_other_time_ms(); + _analytics->predict_rs_update_time_ms(pending_cards) + + _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) + + _analytics->predict_constant_other_time_ms(); } double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { - size_t rs_length = _measurements->predict_rs_lengths() + _measurements->predict_rs_length_diff(); - size_t card_num = _measurements->predict_card_num(rs_length, collector_state()->gcs_are_young()); + size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff(); + size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young()); return predict_base_elapsed_time_ms(pending_cards, card_num); } @@ -1063,19 +1066,19 @@ size_t rs_length = hr->rem_set()->occupied(); // Predicting the number of cards is based on which type of GC // we're predicting for. - size_t card_num = _measurements->predict_card_num(rs_length, for_young_gc); + size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc); size_t bytes_to_copy = predict_bytes_to_copy(hr); double region_elapsed_time_ms = - _measurements->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) + - _measurements->predict_object_copy_time_ms(bytes_to_copy ,collector_state()->during_concurrent_mark()); + _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) + + _analytics->predict_object_copy_time_ms(bytes_to_copy ,collector_state()->during_concurrent_mark()); // The prediction of the "other" time for this region is based // upon the region type and NOT the GC type. if (hr->is_young()) { - region_elapsed_time_ms += _measurements->predict_young_other_time_ms(1); + region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1); } else { - region_elapsed_time_ms += _measurements->predict_non_young_other_time_ms(1); + region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1); } return region_elapsed_time_ms; } @@ -1087,8 +1090,8 @@ } size_t G1CollectorPolicy::expansion_amount() { - double recent_gc_overhead = _measurements->recent_avg_pause_time_ratio() * 100.0; - double last_gc_overhead = _measurements->last_pause_time_ratio() * 100.0; + double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0; + double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0; double threshold = _gc_overhead_perc; size_t expand_bytes = 0; @@ -1373,8 +1376,8 @@ double end_sec = os::elapsedTime(); double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; - _measurements->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); - _measurements->append_prev_collection_pause_end_ms(elapsed_time_ms); + _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); + _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms); record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); } diff --git a/src/share/vm/gc/g1/g1CollectorPolicy.hpp b/src/share/vm/gc/g1/g1CollectorPolicy.hpp --- a/src/share/vm/gc/g1/g1CollectorPolicy.hpp +++ b/src/share/vm/gc/g1/g1CollectorPolicy.hpp @@ -43,7 +43,7 @@ class G1CollectionSet; class CollectionSetChooser; class G1IHOPControl; -class G1Measurements; +class G1Analytics; class G1YoungGenSizer; class G1CollectorPolicy: public CollectorPolicy { @@ -58,7 +58,7 @@ void report_ihop_statistics(); G1Predictions _predictor; - G1Measurements* _measurements; + G1Analytics* _analytics; G1MMUTracker* _mmu_tracker; @@ -121,7 +121,7 @@ G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed; public: const G1Predictions& predictor() const { return _predictor; } - const G1Measurements* measurements() { return const_cast(_measurements); } + const G1Analytics* analytics() const { return const_cast(_analytics); } // Add the given number of bytes to the total number of allocated bytes in the old gen. void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }