64 static double cost_per_byte_ms_defaults[] = {
65 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
66 };
67
68 // these should be pretty consistent
69 static double constant_other_time_ms_defaults[] = {
70 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
71 };
72
73
74 static double young_other_cost_per_region_ms_defaults[] = {
75 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
76 };
77
78 static double non_young_other_cost_per_region_ms_defaults[] = {
79 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
80 };
81
82 G1CollectorPolicy::G1CollectorPolicy() :
83 _predictor(G1ConfidencePercent / 100.0),
84 _parallel_gc_threads(ParallelGCThreads),
85
86 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
87 _stop_world_start(0.0),
88
89 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
90 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
91
92 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
93 _prev_collection_pause_end_ms(0.0),
94 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
95 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
96 _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
97 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
98 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
99 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
100 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
104 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
105 _non_young_other_cost_per_region_ms_seq(
106 new TruncatedSeq(TruncatedSeqLength)),
107
155 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
156
157 // Set up the region size and associated fields. Given that the
158 // policy is created before the heap, we have to set this up here,
159 // so it's done as soon as possible.
160
161 // It would have been natural to pass initial_heap_byte_size() and
162 // max_heap_byte_size() to setup_heap_region_size() but those have
163 // not been set up at this point since they should be aligned with
164 // the region size. So, there is a circular dependency here. We base
165 // the region size on the heap size, but the heap size should be
166 // aligned with the region size. To get around this we use the
167 // unaligned values for the heap.
168 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
169 HeapRegionRemSet::setup_remset_size();
170
171 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
172 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
173 clear_ratio_check_data();
174
175 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
176
177 int index = MIN2(_parallel_gc_threads - 1, 7);
178
179 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
180 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
181 _cost_scan_hcc_seq->add(0.0);
182 _young_cards_per_entry_ratio_seq->add(
183 young_cards_per_entry_ratio_defaults[index]);
184 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
185 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
186 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
187 _young_other_cost_per_region_ms_seq->add(
188 young_other_cost_per_region_ms_defaults[index]);
189 _non_young_other_cost_per_region_ms_seq->add(
190 non_young_other_cost_per_region_ms_defaults[index]);
191
192 // Below, we might need to calculate the pause time target based on
193 // the pause interval. When we do so we are going to give G1 maximum
194 // flexibility and allow it to do pauses when it needs to. So, we'll
195 // arrange that the pause interval to be pause time target + 1 to
196 // ensure that a) the pause time target is maximized with respect to
197 // the pause interval and b) we maintain the invariant that pause
794 }
795 }
796
797 return ret;
798 }
799 #endif // PRODUCT
800
801 void G1CollectorPolicy::record_full_collection_start() {
802 _full_collection_start_sec = os::elapsedTime();
803 // Release the future to-space so that it is available for compaction into.
804 collector_state()->set_full_collection(true);
805 }
806
807 void G1CollectorPolicy::record_full_collection_end() {
808 // Consider this like a collection pause for the purposes of allocation
809 // since last pause.
810 double end_sec = os::elapsedTime();
811 double full_gc_time_sec = end_sec - _full_collection_start_sec;
812 double full_gc_time_ms = full_gc_time_sec * 1000.0;
813
814 _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
815
816 update_recent_gc_times(end_sec, full_gc_time_ms);
817
818 collector_state()->set_full_collection(false);
819
820 // "Nuke" the heuristics that control the young/mixed GC
821 // transitions and make sure we start with young GCs after the Full GC.
822 collector_state()->set_gcs_are_young(true);
823 collector_state()->set_last_young_gc(false);
824 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
825 collector_state()->set_during_initial_mark_pause(false);
826 collector_state()->set_in_marking_window(false);
827 collector_state()->set_in_marking_window_im(false);
828
829 _short_lived_surv_rate_group->start_adding_regions();
830 // also call this on any additional surv rate groups
831
832 record_survivor_regions(0, NULL, NULL);
833
834 _free_regions_at_end_of_collection = _g1->num_free_regions();
835 // Reset survivors SurvRateGroup.
836 _survivor_surv_rate_group->reset();
837 update_young_list_max_and_target_length();
838 update_rs_lengths_prediction();
839 cset_chooser()->clear();
840
841 _bytes_allocated_in_old_since_last_gc = 0;
842
843 record_pause(FullGC, _full_collection_start_sec, end_sec);
844 }
845
846 void G1CollectorPolicy::record_stop_world_start() {
847 _stop_world_start = os::elapsedTime();
848 }
849
850 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
851 // We only need to do this here as the policy will only be applied
852 // to the GC we're about to start. so, no point is calculating this
853 // every time we calculate / recalculate the target young length.
854 update_survivors_policy();
855
856 assert(_g1->used() == _g1->recalculate_used(),
857 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
858 _g1->used(), _g1->recalculate_used());
859
860 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
861 _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
862 _stop_world_start = 0.0;
863
864 phase_times()->record_cur_collection_start_sec(start_time_sec);
865 _pending_cards = _g1->pending_card_num();
866
867 _collection_set_bytes_used_before = 0;
868 _bytes_copied_during_gc = 0;
869
870 collector_state()->set_last_gc_was_young(false);
871
872 // do that for any other surv rate groups
873 _short_lived_surv_rate_group->stop_adding_regions();
874 _survivors_age_table.clear();
875
876 assert( verify_young_ages(), "region age verification" );
877 }
878
879 void G1CollectorPolicy::record_concurrent_mark_init_end(double
880 mark_init_elapsed_time_ms) {
881 collector_state()->set_during_marking(true);
882 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
883 collector_state()->set_during_initial_mark_pause(false);
895 _prev_collection_pause_end_ms += elapsed_time_ms;
896
897 record_pause(Remark, _mark_remark_start_sec, end_time_sec);
898 }
899
900 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
901 _mark_cleanup_start_sec = os::elapsedTime();
902 }
903
904 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
905 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
906 "skip last young-only gc");
907 collector_state()->set_last_young_gc(should_continue_with_reclaim);
908 // We skip the marking phase.
909 if (!should_continue_with_reclaim) {
910 abort_time_to_mixed_tracking();
911 }
912 collector_state()->set_in_marking_window(false);
913 }
914
915 void G1CollectorPolicy::record_concurrent_pause() {
916 if (_stop_world_start > 0.0) {
917 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
918 _trace_young_gen_time_data.record_yield_time(yield_ms);
919 }
920 }
921
922 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
923 return phase_times()->average_time_ms(phase);
924 }
925
926 double G1CollectorPolicy::young_other_time_ms() const {
927 return phase_times()->young_cset_choice_time_ms() +
928 phase_times()->young_free_cset_time_ms();
929 }
930
931 double G1CollectorPolicy::non_young_other_time_ms() const {
932 return phase_times()->non_young_cset_choice_time_ms() +
933 phase_times()->non_young_free_cset_time_ms();
934
935 }
936
937 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
938 return pause_time_ms -
939 average_time_ms(G1GCPhaseTimes::UpdateRS) -
940 average_time_ms(G1GCPhaseTimes::ScanRS) -
941 average_time_ms(G1GCPhaseTimes::ObjCopy) -
986 NOT_PRODUCT(_short_lived_surv_rate_group->print());
987
988 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
989
990 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
991 if (last_pause_included_initial_mark) {
992 record_concurrent_mark_init_end(0.0);
993 } else {
994 maybe_start_marking();
995 }
996
997 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
998 if (app_time_ms < MIN_TIMER_GRANULARITY) {
999 // This usually happens due to the timer not having the required
1000 // granularity. Some Linuxes are the usual culprits.
1001 // We'll just set it to something (arbitrarily) small.
1002 app_time_ms = 1.0;
1003 }
1004
1005 if (update_stats) {
1006 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1007 // We maintain the invariant that all objects allocated by mutator
1008 // threads will be allocated out of eden regions. So, we can use
1009 // the eden region number allocated since the previous GC to
1010 // calculate the application's allocate rate. The only exception
1011 // to that is humongous objects that are allocated separately. But
1012 // given that humongous object allocations do not really affect
1013 // either the pause's duration nor when the next pause will take
1014 // place we can safely ignore them here.
1015 uint regions_allocated = eden_cset_region_length();
1016 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1017 _alloc_rate_ms_seq->add(alloc_rate_ms);
1018
1019 double interval_ms =
1020 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1021 update_recent_gc_times(end_time_sec, pause_time_ms);
1022 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1023 if (recent_avg_pause_time_ratio() < 0.0 ||
1024 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1025 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1026 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1576 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1577 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1578
1579 clear_ratio_check_data();
1580 } else {
1581 // An expansion was not triggered. If we've started counting, increment
1582 // the number of checks we've made in the current window. If we've
1583 // reached the end of the window without resizing, clear the counters to
1584 // start again the next time we see a ratio above the threshold.
1585 if (_ratio_over_threshold_count > 0) {
1586 _pauses_since_start++;
1587 if (_pauses_since_start > NumPrevPausesForHeuristics) {
1588 clear_ratio_check_data();
1589 }
1590 }
1591 }
1592
1593 return expand_bytes;
1594 }
1595
1596 void G1CollectorPolicy::print_tracing_info() const {
1597 _trace_young_gen_time_data.print();
1598 _trace_old_gen_time_data.print();
1599 }
1600
1601 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1602 #ifndef PRODUCT
1603 _short_lived_surv_rate_group->print_surv_rate_summary();
1604 // add this call for any other surv rate groups
1605 #endif // PRODUCT
1606 }
1607
1608 bool G1CollectorPolicy::is_young_list_full() const {
1609 uint young_list_length = _g1->young_list()->length();
1610 uint young_list_target_length = _young_list_target_length;
1611 return young_list_length >= young_list_target_length;
1612 }
1613
1614 bool G1CollectorPolicy::can_expand_young_list() const {
1615 uint young_list_length = _g1->young_list()->length();
1616 uint young_list_max_length = _young_list_max_length;
1617 return young_list_length < young_list_max_length;
1618 }
1619
1620 void G1CollectorPolicy::update_max_gc_locker_expansion() {
2108
2109
2110 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2111 double young_start_time_sec = os::elapsedTime();
2112
2113 YoungList* young_list = _g1->young_list();
2114 finalize_incremental_cset_building();
2115
2116 guarantee(target_pause_time_ms > 0.0,
2117 "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2118 guarantee(_collection_set == NULL, "Precondition");
2119
2120 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2121 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2122
2123 log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
2124 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2125
2126 collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2127
2128 if (collector_state()->last_gc_was_young()) {
2129 _trace_young_gen_time_data.increment_young_collection_count();
2130 } else {
2131 _trace_young_gen_time_data.increment_mixed_collection_count();
2132 }
2133
2134 // The young list is laid with the survivor regions from the previous
2135 // pause are appended to the RHS of the young list, i.e.
2136 // [Newly Young Regions ++ Survivors from last pause].
2137
2138 uint survivor_region_length = young_list->survivor_length();
2139 uint eden_region_length = young_list->eden_length();
2140 init_cset_region_lengths(eden_region_length, survivor_region_length);
2141
2142 HeapRegion* hr = young_list->first_survivor_region();
2143 while (hr != NULL) {
2144 assert(hr->is_survivor(), "badly formed young list");
2145 // There is a convention that all the young regions in the CSet
2146 // are tagged as "eden", so we do this for the survivors here. We
2147 // use the special set_eden_pre_gc() as it doesn't check that the
2148 // region is free (which is not the case here).
2149 hr->set_eden_pre_gc();
2150 hr = hr->get_next_young_region();
2151 }
2152
2153 // Clear the fields that point to the survivor list - they are all young now.
2252
2253 if (expensive_region_num > 0) {
2254 // We print the information once here at the end, predicated on
2255 // whether we added any apparently expensive regions or not, to
2256 // avoid generating output per region.
2257 log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
2258 "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
2259 old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
2260 }
2261
2262 cset_chooser()->verify();
2263 }
2264
2265 stop_incremental_cset_building();
2266
2267 log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
2268 old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
2269
2270 double non_young_end_time_sec = os::elapsedTime();
2271 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2272 }
2273
2274 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2275 if(TraceYoungGenTime) {
2276 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2277 }
2278 }
2279
2280 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2281 if(TraceYoungGenTime) {
2282 _all_yield_times_ms.add(yield_time_ms);
2283 }
2284 }
2285
2286 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2287 if(TraceYoungGenTime) {
2288 _total.add(pause_time_ms);
2289 _other.add(pause_time_ms - phase_times->accounted_time_ms());
2290 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2291 _parallel.add(phase_times->cur_collection_par_time_ms());
2292 _ext_root_scan.add(phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan));
2293 _satb_filtering.add(phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering));
2294 _update_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS));
2295 _scan_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::ScanRS));
2296 _obj_copy.add(phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy));
2297 _termination.add(phase_times->average_time_ms(G1GCPhaseTimes::Termination));
2298
2299 double parallel_known_time = phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan) +
2300 phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering) +
2301 phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS) +
2302 phase_times->average_time_ms(G1GCPhaseTimes::ScanRS) +
2303 phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy) +
2304 phase_times->average_time_ms(G1GCPhaseTimes::Termination);
2305
2306 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2307 _parallel_other.add(parallel_other_time);
2308 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2309 }
2310 }
2311
2312 void TraceYoungGenTimeData::increment_young_collection_count() {
2313 if(TraceYoungGenTime) {
2314 ++_young_pause_num;
2315 }
2316 }
2317
2318 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2319 if(TraceYoungGenTime) {
2320 ++_mixed_pause_num;
2321 }
2322 }
2323
2324 void TraceYoungGenTimeData::print_summary(const char* str,
2325 const NumberSeq* seq) const {
2326 double sum = seq->sum();
2327 tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2328 str, sum / 1000.0, seq->avg());
2329 }
2330
2331 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2332 const NumberSeq* seq) const {
2333 print_summary(str, seq);
2334 tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2335 "(num", seq->num(), seq->sd(), seq->maximum());
2336 }
2337
2338 void TraceYoungGenTimeData::print() const {
2339 if (!TraceYoungGenTime) {
2340 return;
2341 }
2342
2343 tty->print_cr("ALL PAUSES");
2344 print_summary_sd(" Total", &_total);
2345 tty->cr();
2346 tty->cr();
2347 tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
2348 tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
2349 tty->cr();
2350
2351 tty->print_cr("EVACUATION PAUSES");
2352
2353 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2354 tty->print_cr("none");
2355 } else {
2356 print_summary_sd(" Evacuation Pauses", &_total);
2357 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
2358 print_summary(" Parallel Time", &_parallel);
2359 print_summary(" Ext Root Scanning", &_ext_root_scan);
2360 print_summary(" SATB Filtering", &_satb_filtering);
2361 print_summary(" Update RS", &_update_rs);
2362 print_summary(" Scan RS", &_scan_rs);
2363 print_summary(" Object Copy", &_obj_copy);
2364 print_summary(" Termination", &_termination);
2365 print_summary(" Parallel Other", &_parallel_other);
2366 print_summary(" Clear CT", &_clear_ct);
2367 print_summary(" Other", &_other);
2368 }
2369 tty->cr();
2370
2371 tty->print_cr("MISC");
2372 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
2373 print_summary_sd(" Yields", &_all_yield_times_ms);
2374 }
2375
2376 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2377 if (TraceOldGenTime) {
2378 _all_full_gc_times.add(full_gc_time_ms);
2379 }
2380 }
2381
2382 void TraceOldGenTimeData::print() const {
2383 if (!TraceOldGenTime) {
2384 return;
2385 }
2386
2387 if (_all_full_gc_times.num() > 0) {
2388 tty->print("\n%4d full_gcs: total time = %8.2f s",
2389 _all_full_gc_times.num(),
2390 _all_full_gc_times.sum() / 1000.0);
2391 tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2392 tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2393 _all_full_gc_times.sd(),
2394 _all_full_gc_times.maximum());
2395 }
2396 }
|
64 static double cost_per_byte_ms_defaults[] = {
65 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
66 };
67
68 // these should be pretty consistent
69 static double constant_other_time_ms_defaults[] = {
70 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
71 };
72
73
74 static double young_other_cost_per_region_ms_defaults[] = {
75 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
76 };
77
78 static double non_young_other_cost_per_region_ms_defaults[] = {
79 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
80 };
81
82 G1CollectorPolicy::G1CollectorPolicy() :
83 _predictor(G1ConfidencePercent / 100.0),
84
85 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
86
87 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
88 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
89
90 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
91 _prev_collection_pause_end_ms(0.0),
92 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
93 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
94 _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
95 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
96 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
97 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
98 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
99 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
100 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _non_young_other_cost_per_region_ms_seq(
104 new TruncatedSeq(TruncatedSeqLength)),
105
153 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
154
155 // Set up the region size and associated fields. Given that the
156 // policy is created before the heap, we have to set this up here,
157 // so it's done as soon as possible.
158
159 // It would have been natural to pass initial_heap_byte_size() and
160 // max_heap_byte_size() to setup_heap_region_size() but those have
161 // not been set up at this point since they should be aligned with
162 // the region size. So, there is a circular dependency here. We base
163 // the region size on the heap size, but the heap size should be
164 // aligned with the region size. To get around this we use the
165 // unaligned values for the heap.
166 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
167 HeapRegionRemSet::setup_remset_size();
168
169 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
170 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
171 clear_ratio_check_data();
172
173 _phase_times = new G1GCPhaseTimes(ParallelGCThreads);
174
175 int index = MIN2(ParallelGCThreads - 1, 7u);
176
177 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
178 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
179 _cost_scan_hcc_seq->add(0.0);
180 _young_cards_per_entry_ratio_seq->add(
181 young_cards_per_entry_ratio_defaults[index]);
182 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
183 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
184 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
185 _young_other_cost_per_region_ms_seq->add(
186 young_other_cost_per_region_ms_defaults[index]);
187 _non_young_other_cost_per_region_ms_seq->add(
188 non_young_other_cost_per_region_ms_defaults[index]);
189
190 // Below, we might need to calculate the pause time target based on
191 // the pause interval. When we do so we are going to give G1 maximum
192 // flexibility and allow it to do pauses when it needs to. So, we'll
193 // arrange that the pause interval to be pause time target + 1 to
194 // ensure that a) the pause time target is maximized with respect to
195 // the pause interval and b) we maintain the invariant that pause
792 }
793 }
794
795 return ret;
796 }
797 #endif // PRODUCT
798
799 void G1CollectorPolicy::record_full_collection_start() {
800 _full_collection_start_sec = os::elapsedTime();
801 // Release the future to-space so that it is available for compaction into.
802 collector_state()->set_full_collection(true);
803 }
804
805 void G1CollectorPolicy::record_full_collection_end() {
806 // Consider this like a collection pause for the purposes of allocation
807 // since last pause.
808 double end_sec = os::elapsedTime();
809 double full_gc_time_sec = end_sec - _full_collection_start_sec;
810 double full_gc_time_ms = full_gc_time_sec * 1000.0;
811
812 update_recent_gc_times(end_sec, full_gc_time_ms);
813
814 collector_state()->set_full_collection(false);
815
816 // "Nuke" the heuristics that control the young/mixed GC
817 // transitions and make sure we start with young GCs after the Full GC.
818 collector_state()->set_gcs_are_young(true);
819 collector_state()->set_last_young_gc(false);
820 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
821 collector_state()->set_during_initial_mark_pause(false);
822 collector_state()->set_in_marking_window(false);
823 collector_state()->set_in_marking_window_im(false);
824
825 _short_lived_surv_rate_group->start_adding_regions();
826 // also call this on any additional surv rate groups
827
828 record_survivor_regions(0, NULL, NULL);
829
830 _free_regions_at_end_of_collection = _g1->num_free_regions();
831 // Reset survivors SurvRateGroup.
832 _survivor_surv_rate_group->reset();
833 update_young_list_max_and_target_length();
834 update_rs_lengths_prediction();
835 cset_chooser()->clear();
836
837 _bytes_allocated_in_old_since_last_gc = 0;
838
839 record_pause(FullGC, _full_collection_start_sec, end_sec);
840 }
841
842 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
843 // We only need to do this here as the policy will only be applied
844 // to the GC we're about to start. so, no point is calculating this
845 // every time we calculate / recalculate the target young length.
846 update_survivors_policy();
847
848 assert(_g1->used() == _g1->recalculate_used(),
849 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
850 _g1->used(), _g1->recalculate_used());
851
852 phase_times()->record_cur_collection_start_sec(start_time_sec);
853 _pending_cards = _g1->pending_card_num();
854
855 _collection_set_bytes_used_before = 0;
856 _bytes_copied_during_gc = 0;
857
858 collector_state()->set_last_gc_was_young(false);
859
860 // do that for any other surv rate groups
861 _short_lived_surv_rate_group->stop_adding_regions();
862 _survivors_age_table.clear();
863
864 assert( verify_young_ages(), "region age verification" );
865 }
866
867 void G1CollectorPolicy::record_concurrent_mark_init_end(double
868 mark_init_elapsed_time_ms) {
869 collector_state()->set_during_marking(true);
870 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
871 collector_state()->set_during_initial_mark_pause(false);
883 _prev_collection_pause_end_ms += elapsed_time_ms;
884
885 record_pause(Remark, _mark_remark_start_sec, end_time_sec);
886 }
887
888 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
889 _mark_cleanup_start_sec = os::elapsedTime();
890 }
891
892 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
893 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
894 "skip last young-only gc");
895 collector_state()->set_last_young_gc(should_continue_with_reclaim);
896 // We skip the marking phase.
897 if (!should_continue_with_reclaim) {
898 abort_time_to_mixed_tracking();
899 }
900 collector_state()->set_in_marking_window(false);
901 }
902
903 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
904 return phase_times()->average_time_ms(phase);
905 }
906
907 double G1CollectorPolicy::young_other_time_ms() const {
908 return phase_times()->young_cset_choice_time_ms() +
909 phase_times()->young_free_cset_time_ms();
910 }
911
912 double G1CollectorPolicy::non_young_other_time_ms() const {
913 return phase_times()->non_young_cset_choice_time_ms() +
914 phase_times()->non_young_free_cset_time_ms();
915
916 }
917
918 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
919 return pause_time_ms -
920 average_time_ms(G1GCPhaseTimes::UpdateRS) -
921 average_time_ms(G1GCPhaseTimes::ScanRS) -
922 average_time_ms(G1GCPhaseTimes::ObjCopy) -
967 NOT_PRODUCT(_short_lived_surv_rate_group->print());
968
969 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
970
971 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
972 if (last_pause_included_initial_mark) {
973 record_concurrent_mark_init_end(0.0);
974 } else {
975 maybe_start_marking();
976 }
977
978 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
979 if (app_time_ms < MIN_TIMER_GRANULARITY) {
980 // This usually happens due to the timer not having the required
981 // granularity. Some Linuxes are the usual culprits.
982 // We'll just set it to something (arbitrarily) small.
983 app_time_ms = 1.0;
984 }
985
986 if (update_stats) {
987 // We maintain the invariant that all objects allocated by mutator
988 // threads will be allocated out of eden regions. So, we can use
989 // the eden region number allocated since the previous GC to
990 // calculate the application's allocate rate. The only exception
991 // to that is humongous objects that are allocated separately. But
992 // given that humongous object allocations do not really affect
993 // either the pause's duration nor when the next pause will take
994 // place we can safely ignore them here.
995 uint regions_allocated = eden_cset_region_length();
996 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
997 _alloc_rate_ms_seq->add(alloc_rate_ms);
998
999 double interval_ms =
1000 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1001 update_recent_gc_times(end_time_sec, pause_time_ms);
1002 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1003 if (recent_avg_pause_time_ratio() < 0.0 ||
1004 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1005 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1006 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1556 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1557 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1558
1559 clear_ratio_check_data();
1560 } else {
1561 // An expansion was not triggered. If we've started counting, increment
1562 // the number of checks we've made in the current window. If we've
1563 // reached the end of the window without resizing, clear the counters to
1564 // start again the next time we see a ratio above the threshold.
1565 if (_ratio_over_threshold_count > 0) {
1566 _pauses_since_start++;
1567 if (_pauses_since_start > NumPrevPausesForHeuristics) {
1568 clear_ratio_check_data();
1569 }
1570 }
1571 }
1572
1573 return expand_bytes;
1574 }
1575
1576 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1577 #ifndef PRODUCT
1578 _short_lived_surv_rate_group->print_surv_rate_summary();
1579 // add this call for any other surv rate groups
1580 #endif // PRODUCT
1581 }
1582
1583 bool G1CollectorPolicy::is_young_list_full() const {
1584 uint young_list_length = _g1->young_list()->length();
1585 uint young_list_target_length = _young_list_target_length;
1586 return young_list_length >= young_list_target_length;
1587 }
1588
1589 bool G1CollectorPolicy::can_expand_young_list() const {
1590 uint young_list_length = _g1->young_list()->length();
1591 uint young_list_max_length = _young_list_max_length;
1592 return young_list_length < young_list_max_length;
1593 }
1594
1595 void G1CollectorPolicy::update_max_gc_locker_expansion() {
2083
2084
2085 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2086 double young_start_time_sec = os::elapsedTime();
2087
2088 YoungList* young_list = _g1->young_list();
2089 finalize_incremental_cset_building();
2090
2091 guarantee(target_pause_time_ms > 0.0,
2092 "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2093 guarantee(_collection_set == NULL, "Precondition");
2094
2095 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2096 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2097
2098 log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
2099 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2100
2101 collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2102
2103 // The young list is laid with the survivor regions from the previous
2104 // pause are appended to the RHS of the young list, i.e.
2105 // [Newly Young Regions ++ Survivors from last pause].
2106
2107 uint survivor_region_length = young_list->survivor_length();
2108 uint eden_region_length = young_list->eden_length();
2109 init_cset_region_lengths(eden_region_length, survivor_region_length);
2110
2111 HeapRegion* hr = young_list->first_survivor_region();
2112 while (hr != NULL) {
2113 assert(hr->is_survivor(), "badly formed young list");
2114 // There is a convention that all the young regions in the CSet
2115 // are tagged as "eden", so we do this for the survivors here. We
2116 // use the special set_eden_pre_gc() as it doesn't check that the
2117 // region is free (which is not the case here).
2118 hr->set_eden_pre_gc();
2119 hr = hr->get_next_young_region();
2120 }
2121
2122 // Clear the fields that point to the survivor list - they are all young now.
2221
2222 if (expensive_region_num > 0) {
2223 // We print the information once here at the end, predicated on
2224 // whether we added any apparently expensive regions or not, to
2225 // avoid generating output per region.
2226 log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
2227 "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
2228 old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
2229 }
2230
2231 cset_chooser()->verify();
2232 }
2233
2234 stop_incremental_cset_building();
2235
2236 log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
2237 old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
2238
2239 double non_young_end_time_sec = os::elapsedTime();
2240 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2241 }
|