11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentG1Refine.hpp"
27 #include "gc/g1/concurrentMark.hpp"
28 #include "gc/g1/concurrentMarkThread.inline.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/g1CollectorPolicy.hpp"
31 #include "gc/g1/g1ErgoVerbose.hpp"
32 #include "gc/g1/g1GCPhaseTimes.hpp"
33 #include "gc/g1/g1Log.hpp"
34 #include "gc/g1/heapRegion.inline.hpp"
35 #include "gc/g1/heapRegionRemSet.hpp"
36 #include "gc/shared/gcPolicyCounters.hpp"
37 #include "runtime/arguments.hpp"
38 #include "runtime/java.hpp"
39 #include "runtime/mutexLocker.hpp"
40 #include "utilities/debug.hpp"
41
42 // Different defaults for different number of GC threads
43 // They were chosen by running GCOld and SPECjbb on debris with different
44 // numbers of GC threads and choosing them based on the results
45
46 // all the same
47 static double rs_length_diff_defaults[] = {
48 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
49 };
50
131 _collection_set(NULL),
132 _collection_set_bytes_used_before(0),
133
134 // Incremental CSet attributes
135 _inc_cset_build_state(Inactive),
136 _inc_cset_head(NULL),
137 _inc_cset_tail(NULL),
138 _inc_cset_bytes_used_before(0),
139 _inc_cset_max_finger(NULL),
140 _inc_cset_recorded_rs_lengths(0),
141 _inc_cset_recorded_rs_lengths_diffs(0),
142 _inc_cset_predicted_elapsed_time_ms(0.0),
143 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
144
145 // add here any more surv rate groups
146 _recorded_survivor_regions(0),
147 _recorded_survivor_head(NULL),
148 _recorded_survivor_tail(NULL),
149 _survivors_age_table(true),
150
151 _gc_overhead_perc(0.0) {
152
153 // SurvRateGroups below must be initialized after the predictor because they
154 // indirectly use it through this object passed to their constructor.
155 _short_lived_surv_rate_group =
156 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
157 _survivor_surv_rate_group =
158 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
159
160 // Set up the region size and associated fields. Given that the
161 // policy is created before the heap, we have to set this up here,
162 // so it's done as soon as possible.
163
164 // It would have been natural to pass initial_heap_byte_size() and
165 // max_heap_byte_size() to setup_heap_region_size() but those have
166 // not been set up at this point since they should be aligned with
167 // the region size. So, there is a circular dependency here. We base
168 // the region size on the heap size, but the heap size should be
169 // aligned with the region size. To get around this we use the
170 // unaligned values for the heap.
171 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
271 assert(GCTimeRatio > 0,
272 "we should have set it to a default value set_g1_gc_flags() "
273 "if a user set it to 0");
274 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
275
276 uintx reserve_perc = G1ReservePercent;
277 // Put an artificial ceiling on this so that it's not set to a silly value.
278 if (reserve_perc > 50) {
279 reserve_perc = 50;
280 warning("G1ReservePercent is set to a value that is too large, "
281 "it's been updated to " UINTX_FORMAT, reserve_perc);
282 }
283 _reserve_factor = (double) reserve_perc / 100.0;
284 // This will be set when the heap is expanded
285 // for the first time during initialization.
286 _reserve_regions = 0;
287
288 _collectionSetChooser = new CollectionSetChooser();
289 }
290
291 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
292 return _predictor.get_new_prediction(seq);
293 }
294
295 void G1CollectorPolicy::initialize_alignments() {
296 _space_alignment = HeapRegion::GrainBytes;
297 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
298 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
299 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
300 }
301
302 void G1CollectorPolicy::initialize_flags() {
303 if (G1HeapRegionSize != HeapRegion::GrainBytes) {
304 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
305 }
306
307 if (SurvivorRatio < 1) {
308 vm_exit_during_initialization("Invalid survivor ratio specified");
309 }
310 CollectorPolicy::initialize_flags();
311 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
312 }
313
314 void G1CollectorPolicy::post_heap_initialize() {
315 uintx max_regions = G1CollectedHeap::heap()->max_regions();
316 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
317 if (max_young_size != MaxNewSize) {
318 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
319 }
320 }
321
322 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
323
324 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
325 _min_desired_young_length(0), _max_desired_young_length(0) {
326 if (FLAG_IS_CMDLINE(NewRatio)) {
327 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
328 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
329 } else {
330 _sizer_kind = SizerNewRatio;
331 _adaptive_size = false;
332 return;
333 }
334 }
335
336 if (NewSize > MaxNewSize) {
337 if (FLAG_IS_CMDLINE(MaxNewSize)) {
338 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
339 "A new max generation size of " SIZE_FORMAT "k will be used.",
494 double now_sec = os::elapsedTime();
495 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
496 double alloc_rate_ms = predict_alloc_rate_ms();
497 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
498 } else {
499 // otherwise we don't have enough info to make the prediction
500 }
501 }
502 desired_min_length += base_min_length;
503 // make sure we don't go below any user-defined minimum bound
504 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
505 }
506
507 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
508 // Here, we might want to also take into account any additional
509 // constraints (i.e., user-defined minimum bound). Currently, we
510 // effectively don't set this bound.
511 return _young_gen_sizer->max_desired_young_length();
512 }
513
514 void G1CollectorPolicy::update_young_list_max_and_target_length() {
515 update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
516 }
517
518 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
519 update_young_list_target_length(rs_lengths);
520 update_max_gc_locker_expansion();
521 }
522
523 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
524 _young_list_target_length = bounded_young_list_target_length(rs_lengths);
525 }
526
527 void G1CollectorPolicy::update_young_list_target_length() {
528 update_young_list_target_length(get_new_prediction(_rs_lengths_seq));
529 }
530
531 uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths) const {
532 // Calculate the absolute and desired min bounds.
533
534 // This is how many young regions we already have (currently: the survivors).
535 uint base_min_length = recorded_survivor_regions();
536 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
537 // This is the absolute minimum young length. Ensure that we
538 // will at least have one eden region available for allocation.
539 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
540 // If we shrank the young list target it should not shrink below the current size.
541 desired_min_length = MAX2(desired_min_length, absolute_min_length);
542 // Calculate the absolute and desired max bounds.
543
544 // We will try our best not to "eat" into the reserve.
545 uint absolute_max_length = 0;
546 if (_free_regions_at_end_of_collection > _reserve_regions) {
547 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
548 }
549 uint desired_max_length = calculate_young_list_desired_max_length();
550 if (desired_max_length > absolute_max_length) {
551 desired_max_length = absolute_max_length;
552 }
553
554 uint young_list_target_length = 0;
555 if (adaptive_young_list_length()) {
556 if (collector_state()->gcs_are_young()) {
557 young_list_target_length =
558 calculate_young_list_target_length(rs_lengths,
559 base_min_length,
560 desired_min_length,
561 desired_max_length);
562 } else {
563 // Don't calculate anything and let the code below bound it to
564 // the desired_min_length, i.e., do the next GC as soon as
565 // possible to maximize how many old regions we can add to it.
566 }
567 } else {
568 // The user asked for a fixed young gen so we'll fix the young gen
569 // whether the next GC is young or mixed.
570 young_list_target_length = _young_list_fixed_length;
571 }
572
573 // Make sure we don't go over the desired max length, nor under the
574 // desired min length. In case they clash, desired_min_length wins
575 // which is why that test is second.
576 if (young_list_target_length > desired_max_length) {
577 young_list_target_length = desired_max_length;
578 }
579 if (young_list_target_length < desired_min_length) {
580 young_list_target_length = desired_min_length;
581 }
582
583 assert(young_list_target_length > recorded_survivor_regions(),
584 "we should be able to allocate at least one eden region");
585 assert(young_list_target_length >= absolute_min_length, "post-condition");
586
587 return young_list_target_length;
588 }
589
590 uint
591 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
592 uint base_min_length,
810 // "Nuke" the heuristics that control the young/mixed GC
811 // transitions and make sure we start with young GCs after the Full GC.
812 collector_state()->set_gcs_are_young(true);
813 collector_state()->set_last_young_gc(false);
814 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
815 collector_state()->set_during_initial_mark_pause(false);
816 collector_state()->set_in_marking_window(false);
817 collector_state()->set_in_marking_window_im(false);
818
819 _short_lived_surv_rate_group->start_adding_regions();
820 // also call this on any additional surv rate groups
821
822 record_survivor_regions(0, NULL, NULL);
823
824 _free_regions_at_end_of_collection = _g1->num_free_regions();
825 // Reset survivors SurvRateGroup.
826 _survivor_surv_rate_group->reset();
827 update_young_list_max_and_target_length();
828 update_rs_lengths_prediction();
829 _collectionSetChooser->clear();
830 }
831
832 void G1CollectorPolicy::record_stop_world_start() {
833 _stop_world_start = os::elapsedTime();
834 }
835
836 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
837 // We only need to do this here as the policy will only be applied
838 // to the GC we're about to start. so, no point is calculating this
839 // every time we calculate / recalculate the target young length.
840 update_survivors_policy();
841
842 assert(_g1->used() == _g1->recalculate_used(),
843 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
844 _g1->used(), _g1->recalculate_used());
845
846 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
847 _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
848 _stop_world_start = 0.0;
849
867 void G1CollectorPolicy::record_concurrent_mark_init_end(double
868 mark_init_elapsed_time_ms) {
869 collector_state()->set_during_marking(true);
870 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
871 collector_state()->set_during_initial_mark_pause(false);
872 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
873 }
874
875 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
876 _mark_remark_start_sec = os::elapsedTime();
877 collector_state()->set_during_marking(false);
878 }
879
880 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
881 double end_time_sec = os::elapsedTime();
882 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
883 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
884 _cur_mark_stop_world_time_ms += elapsed_time_ms;
885 _prev_collection_pause_end_ms += elapsed_time_ms;
886
887 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec);
888 }
889
890 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
891 _mark_cleanup_start_sec = os::elapsedTime();
892 }
893
894 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
895 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
896 "skip last young-only gc");
897 collector_state()->set_last_young_gc(should_continue_with_reclaim);
898 collector_state()->set_in_marking_window(false);
899 }
900
901 void G1CollectorPolicy::record_concurrent_pause() {
902 if (_stop_world_start > 0.0) {
903 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
904 _trace_young_gen_time_data.record_yield_time(yield_ms);
905 }
906 }
907
908 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
909 return phase_times()->average_time_ms(phase);
910 }
911
912 double G1CollectorPolicy::young_other_time_ms() const {
913 return phase_times()->young_cset_choice_time_ms() +
914 phase_times()->young_free_cset_time_ms();
915 }
916
917 double G1CollectorPolicy::non_young_other_time_ms() const {
924 return pause_time_ms -
925 average_time_ms(G1GCPhaseTimes::UpdateRS) -
926 average_time_ms(G1GCPhaseTimes::ScanRS) -
927 average_time_ms(G1GCPhaseTimes::ObjCopy) -
928 average_time_ms(G1GCPhaseTimes::Termination);
929 }
930
931 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
932 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
933 }
934
935 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
936 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
937 }
938
939 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
940 if (about_to_start_mixed_phase()) {
941 return false;
942 }
943
944 size_t marking_initiating_used_threshold =
945 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
946 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
947 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
948
949 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
950 if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
951 ergo_verbose5(ErgoConcCycles,
952 "request concurrent cycle initiation",
953 ergo_format_reason("occupancy higher than threshold")
954 ergo_format_byte("occupancy")
955 ergo_format_byte("allocation request")
956 ergo_format_byte_perc("threshold")
957 ergo_format_str("source"),
958 cur_used_bytes,
959 alloc_byte_size,
960 marking_initiating_used_threshold,
961 (double) InitiatingHeapOccupancyPercent,
962 source);
963 return true;
964 } else {
965 ergo_verbose5(ErgoConcCycles,
966 "do not request concurrent cycle initiation",
967 ergo_format_reason("still doing mixed collections")
968 ergo_format_byte("occupancy")
969 ergo_format_byte("allocation request")
970 ergo_format_byte_perc("threshold")
971 ergo_format_str("source"),
972 cur_used_bytes,
973 alloc_byte_size,
974 marking_initiating_used_threshold,
975 (double) InitiatingHeapOccupancyPercent,
976 source);
977 }
978 }
979
980 return false;
981 }
982
983 // Anything below that is considered to be zero
984 #define MIN_TIMER_GRANULARITY 0.0000001
985
986 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
987 double end_time_sec = os::elapsedTime();
988 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
989 "otherwise, the subtraction below does not make sense");
990 size_t rs_size =
991 _cur_collection_pause_used_regions_at_start - cset_region_length();
992 size_t cur_used_bytes = _g1->used();
993 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
994 bool last_pause_included_initial_mark = false;
995 bool update_stats = !_g1->evacuation_failed();
996
997 #ifndef PRODUCT
998 if (G1YoungSurvRateVerbose) {
999 gclog_or_tty->cr();
1000 _short_lived_surv_rate_group->print();
1001 // do that for any other surv rate groups too
1002 }
1003 #endif // PRODUCT
1004
1005 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1006 if (last_pause_included_initial_mark) {
1007 record_concurrent_mark_init_end(0.0);
1008 } else {
1009 maybe_start_marking();
1010 }
1011
1012 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
1013
1014 if (update_stats) {
1015 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1016 // this is where we update the allocation rate of the application
1017 double app_time_ms =
1018 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1019 if (app_time_ms < MIN_TIMER_GRANULARITY) {
1020 // This usually happens due to the timer not having the required
1021 // granularity. Some Linuxes are the usual culprits.
1022 // We'll just set it to something (arbitrarily) small.
1023 app_time_ms = 1.0;
1024 }
1025 // We maintain the invariant that all objects allocated by mutator
1026 // threads will be allocated out of eden regions. So, we can use
1027 // the eden region number allocated since the previous GC to
1028 // calculate the application's allocate rate. The only exception
1029 // to that is humongous objects that are allocated separately. But
1030 // given that humongous object allocations do not really affect
1031 // either the pause's duration nor when the next pause will take
1032 // place we can safely ignore them here.
1033 uint regions_allocated = eden_cset_region_length();
1034 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1035 _alloc_rate_ms_seq->add(alloc_rate_ms);
1036
1037 double interval_ms =
1062 _recent_avg_pause_time_ratio = 1.0;
1063 }
1064 }
1065 }
1066
1067 bool new_in_marking_window = collector_state()->in_marking_window();
1068 bool new_in_marking_window_im = false;
1069 if (last_pause_included_initial_mark) {
1070 new_in_marking_window = true;
1071 new_in_marking_window_im = true;
1072 }
1073
1074 if (collector_state()->last_young_gc()) {
1075 // This is supposed to to be the "last young GC" before we start
1076 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1077 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
1078
1079 if (next_gc_should_be_mixed("start mixed GCs",
1080 "do not start mixed GCs")) {
1081 collector_state()->set_gcs_are_young(false);
1082 }
1083
1084 collector_state()->set_last_young_gc(false);
1085 }
1086
1087 if (!collector_state()->last_gc_was_young()) {
1088 // This is a mixed GC. Here we decide whether to continue doing
1089 // mixed GCs or not.
1090
1091 if (!next_gc_should_be_mixed("continue mixed GCs",
1092 "do not continue mixed GCs")) {
1093 collector_state()->set_gcs_are_young(true);
1094
1095 maybe_start_marking();
1096 }
1097 }
1098
1099 _short_lived_surv_rate_group->start_adding_regions();
1100 // Do that for any other surv rate groups
1101
1102 if (update_stats) {
1103 double cost_per_card_ms = 0.0;
1104 double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
1105 if (_pending_cards > 0) {
1106 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
1107 _cost_per_card_ms_seq->add(cost_per_card_ms);
1108 }
1109 _cost_scan_hcc_seq->add(cost_scan_hcc);
1110
1162
1163 if (young_cset_region_length() > 0) {
1164 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
1165 young_cset_region_length());
1166 }
1167
1168 if (old_cset_region_length() > 0) {
1169 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
1170 old_cset_region_length());
1171 }
1172
1173 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1174
1175 _pending_cards_seq->add((double) _pending_cards);
1176 _rs_lengths_seq->add((double) _max_rs_lengths);
1177 }
1178
1179 collector_state()->set_in_marking_window(new_in_marking_window);
1180 collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1181 _free_regions_at_end_of_collection = _g1->num_free_regions();
1182 update_young_list_max_and_target_length();
1183 update_rs_lengths_prediction();
1184
1185 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1186 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1187
1188 double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1189
1190 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1191 ergo_verbose2(ErgoTiming,
1192 "adjust concurrent refinement thresholds",
1193 ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
1194 ergo_format_ms("Update RS time goal")
1195 ergo_format_ms("Scan HCC time"),
1196 update_rs_time_goal_ms,
1197 scan_hcc_time_ms);
1198
1199 update_rs_time_goal_ms = 0;
1200 } else {
1201 update_rs_time_goal_ms -= scan_hcc_time_ms;
1202 }
1203 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1204 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1205 update_rs_time_goal_ms);
1206
1207 _collectionSetChooser->verify();
1208 }
1209
1210 #define EXT_SIZE_FORMAT "%.1f%s"
1211 #define EXT_SIZE_PARAMS(bytes) \
1212 byte_size_in_proper_unit((double)(bytes)), \
1213 proper_unit_for_byte_size((bytes))
1214
1215 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1216 YoungList* young_list = _g1->young_list();
1217 _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1218 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1219 _heap_capacity_bytes_before_gc = _g1->capacity();
1220 _heap_used_bytes_before_gc = _g1->used();
1221 _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1222
1223 _eden_capacity_bytes_before_gc =
1224 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1225
1226 if (full) {
1227 _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1228 }
1229 }
1702
1703 public:
1704 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
1705 AbstractGangTask("ParKnownGarbageTask"),
1706 _hrSorted(hrSorted), _chunk_size(chunk_size),
1707 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
1708
1709 void work(uint worker_id) {
1710 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1711 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
1712 }
1713 };
1714
1715 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
1716 assert(n_workers > 0, "Active gc workers should be greater than 0");
1717 const uint overpartition_factor = 4;
1718 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1719 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1720 }
1721
1722 void
1723 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1724 _collectionSetChooser->clear();
1725
1726 WorkGang* workers = _g1->workers();
1727 uint n_workers = workers->active_workers();
1728
1729 uint n_regions = _g1->num_regions();
1730 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1731 _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1732 ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
1733 workers->run_task(&par_known_garbage_task);
1734
1735 _collectionSetChooser->sort_regions();
1736
1737 double end_sec = os::elapsedTime();
1738 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1739 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1740 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1741 _prev_collection_pause_end_ms += elapsed_time_ms;
1742 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec);
1743 }
1744
1745 // Add the heap region at the head of the non-incremental collection set
1746 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1747 assert(_inc_cset_build_state == Active, "Precondition");
1748 assert(hr->is_old(), "the region should be old");
1749
1750 assert(!hr->in_collection_set(), "should not already be in the CSet");
1751 _g1->register_old_region_with_cset(hr);
1752 hr->set_next_in_collection_set(_collection_set);
1753 _collection_set = hr;
1754 _collection_set_bytes_used_before += hr->used();
1755 size_t rs_length = hr->rem_set()->occupied();
1756 _recorded_rs_lengths += rs_length;
1757 _old_cset_region_length += 1;
1758 }
1759
1760 // Initialize the per-collection-set information
1761 void G1CollectorPolicy::start_incremental_cset_building() {
1762 assert(_inc_cset_build_state == Inactive, "Precondition");
1936 csr = next;
1937 }
1938 }
1939 #endif // !PRODUCT
1940
1941 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1942 // Returns the given amount of reclaimable bytes (that represents
1943 // the amount of reclaimable space still to be collected) as a
1944 // percentage of the current heap capacity.
1945 size_t capacity_bytes = _g1->capacity();
1946 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1947 }
1948
1949 void G1CollectorPolicy::maybe_start_marking() {
1950 if (need_to_start_conc_mark("end of GC")) {
1951 // Note: this might have already been set, if during the last
1952 // pause we decided to start a cycle but at the beginning of
1953 // this pause we decided to postpone it. That's OK.
1954 collector_state()->set_initiate_conc_mark_if_possible(true);
1955 }
1956 }
1957
1958 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1959 const char* false_action_str) const {
1960 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1961 if (cset_chooser->is_empty()) {
1962 ergo_verbose0(ErgoMixedGCs,
1963 false_action_str,
1964 ergo_format_reason("candidate old regions not available"));
1965 return false;
1966 }
1967
1968 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1969 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1970 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1971 double threshold = (double) G1HeapWastePercent;
1972 if (reclaimable_perc <= threshold) {
1973 ergo_verbose4(ErgoMixedGCs,
1974 false_action_str,
1975 ergo_format_reason("reclaimable percentage not over threshold")
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentG1Refine.hpp"
27 #include "gc/g1/concurrentMark.hpp"
28 #include "gc/g1/concurrentMarkThread.inline.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/g1CollectorPolicy.hpp"
31 #include "gc/g1/g1IHOPControl.hpp"
32 #include "gc/g1/g1ErgoVerbose.hpp"
33 #include "gc/g1/g1GCPhaseTimes.hpp"
34 #include "gc/g1/g1Log.hpp"
35 #include "gc/g1/heapRegion.inline.hpp"
36 #include "gc/g1/heapRegionRemSet.hpp"
37 #include "gc/shared/gcPolicyCounters.hpp"
38 #include "runtime/arguments.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/mutexLocker.hpp"
41 #include "utilities/debug.hpp"
42
43 // Different defaults for different number of GC threads
44 // They were chosen by running GCOld and SPECjbb on debris with different
45 // numbers of GC threads and choosing them based on the results
46
47 // all the same
48 static double rs_length_diff_defaults[] = {
49 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
50 };
51
132 _collection_set(NULL),
133 _collection_set_bytes_used_before(0),
134
135 // Incremental CSet attributes
136 _inc_cset_build_state(Inactive),
137 _inc_cset_head(NULL),
138 _inc_cset_tail(NULL),
139 _inc_cset_bytes_used_before(0),
140 _inc_cset_max_finger(NULL),
141 _inc_cset_recorded_rs_lengths(0),
142 _inc_cset_recorded_rs_lengths_diffs(0),
143 _inc_cset_predicted_elapsed_time_ms(0.0),
144 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
145
146 // add here any more surv rate groups
147 _recorded_survivor_regions(0),
148 _recorded_survivor_head(NULL),
149 _recorded_survivor_tail(NULL),
150 _survivors_age_table(true),
151
152 _gc_overhead_perc(0.0),
153
154 _last_old_allocated_bytes(0),
155 _ihop_control(NULL),
156 _initial_mark_to_mixed() {
157
158 // SurvRateGroups below must be initialized after the predictor because they
159 // indirectly use it through this object passed to their constructor.
160 _short_lived_surv_rate_group =
161 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
162 _survivor_surv_rate_group =
163 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
164
165 // Set up the region size and associated fields. Given that the
166 // policy is created before the heap, we have to set this up here,
167 // so it's done as soon as possible.
168
169 // It would have been natural to pass initial_heap_byte_size() and
170 // max_heap_byte_size() to setup_heap_region_size() but those have
171 // not been set up at this point since they should be aligned with
172 // the region size. So, there is a circular dependency here. We base
173 // the region size on the heap size, but the heap size should be
174 // aligned with the region size. To get around this we use the
175 // unaligned values for the heap.
176 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
276 assert(GCTimeRatio > 0,
277 "we should have set it to a default value set_g1_gc_flags() "
278 "if a user set it to 0");
279 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
280
281 uintx reserve_perc = G1ReservePercent;
282 // Put an artificial ceiling on this so that it's not set to a silly value.
283 if (reserve_perc > 50) {
284 reserve_perc = 50;
285 warning("G1ReservePercent is set to a value that is too large, "
286 "it's been updated to " UINTX_FORMAT, reserve_perc);
287 }
288 _reserve_factor = (double) reserve_perc / 100.0;
289 // This will be set when the heap is expanded
290 // for the first time during initialization.
291 _reserve_regions = 0;
292
293 _collectionSetChooser = new CollectionSetChooser();
294 }
295
296 G1CollectorPolicy::~G1CollectorPolicy() {
297 if (_ihop_control != NULL) {
298 delete _ihop_control;
299 }
300 }
301
302 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
303 return _predictor.get_new_prediction(seq);
304 }
305
306 void G1CollectorPolicy::initialize_alignments() {
307 _space_alignment = HeapRegion::GrainBytes;
308 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
309 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
310 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
311 }
312
313 void G1CollectorPolicy::initialize_flags() {
314 if (G1HeapRegionSize != HeapRegion::GrainBytes) {
315 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
316 }
317
318 if (SurvivorRatio < 1) {
319 vm_exit_during_initialization("Invalid survivor ratio specified");
320 }
321 CollectorPolicy::initialize_flags();
322 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
323 }
324
325 void G1CollectorPolicy::post_heap_initialize() {
326 uintx max_regions = G1CollectedHeap::heap()->max_regions();
327 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
328 if (max_young_size != MaxNewSize) {
329 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
330 }
331
332 _ihop_control = create_ihop_control();
333 }
334
335 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
336
337 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
338 _min_desired_young_length(0), _max_desired_young_length(0) {
339 if (FLAG_IS_CMDLINE(NewRatio)) {
340 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
341 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
342 } else {
343 _sizer_kind = SizerNewRatio;
344 _adaptive_size = false;
345 return;
346 }
347 }
348
349 if (NewSize > MaxNewSize) {
350 if (FLAG_IS_CMDLINE(MaxNewSize)) {
351 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
352 "A new max generation size of " SIZE_FORMAT "k will be used.",
507 double now_sec = os::elapsedTime();
508 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
509 double alloc_rate_ms = predict_alloc_rate_ms();
510 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
511 } else {
512 // otherwise we don't have enough info to make the prediction
513 }
514 }
515 desired_min_length += base_min_length;
516 // make sure we don't go below any user-defined minimum bound
517 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
518 }
519
520 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
521 // Here, we might want to also take into account any additional
522 // constraints (i.e., user-defined minimum bound). Currently, we
523 // effectively don't set this bound.
524 return _young_gen_sizer->max_desired_young_length();
525 }
526
527 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t* unbounded_target_length) {
528 update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq), unbounded_target_length);
529 }
530
531 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths, size_t* unbounded_target_length) {
532 update_young_list_target_length(rs_lengths, unbounded_target_length);
533 update_max_gc_locker_expansion();
534 }
535
536 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths, size_t* unbounded_target_length) {
537 _young_list_target_length = bounded_young_list_target_length(rs_lengths, unbounded_target_length);
538 }
539
540 void G1CollectorPolicy::update_young_list_target_length() {
541 update_young_list_target_length(get_new_prediction(_rs_lengths_seq));
542 }
543
544 uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths, size_t* unbounded_target_length) const {
545 // Calculate the absolute and desired min bounds.
546
547 // This is how many young regions we already have (currently: the survivors).
548 uint base_min_length = recorded_survivor_regions();
549 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
550 // This is the absolute minimum young length. Ensure that we
551 // will at least have one eden region available for allocation.
552 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
553 // If we shrank the young list target it should not shrink below the current size.
554 desired_min_length = MAX2(desired_min_length, absolute_min_length);
555 // Calculate the absolute and desired max bounds.
556
557 uint desired_max_length = calculate_young_list_desired_max_length();
558
559 uint young_list_target_length = 0;
560 if (adaptive_young_list_length()) {
561 if (collector_state()->gcs_are_young()) {
562 young_list_target_length =
563 calculate_young_list_target_length(rs_lengths,
564 base_min_length,
565 desired_min_length,
566 desired_max_length);
567 } else {
568 // Don't calculate anything and let the code below bound it to
569 // the desired_min_length, i.e., do the next GC as soon as
570 // possible to maximize how many old regions we can add to it.
571 }
572 } else {
573 // The user asked for a fixed young gen so we'll fix the young gen
574 // whether the next GC is young or mixed.
575 young_list_target_length = _young_list_fixed_length;
576 }
577
578 if (unbounded_target_length != NULL) {
579 *unbounded_target_length = young_list_target_length;
580 }
581
582 // We will try our best not to "eat" into the reserve.
583 uint absolute_max_length = 0;
584 if (_free_regions_at_end_of_collection > _reserve_regions) {
585 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
586 }
587 if (desired_max_length > absolute_max_length) {
588 desired_max_length = absolute_max_length;
589 }
590
591 // Make sure we don't go over the desired max length, nor under the
592 // desired min length. In case they clash, desired_min_length wins
593 // which is why that test is second.
594 if (young_list_target_length > desired_max_length) {
595 young_list_target_length = desired_max_length;
596 }
597 if (young_list_target_length < desired_min_length) {
598 young_list_target_length = desired_min_length;
599 }
600
601 assert(young_list_target_length > recorded_survivor_regions(),
602 "we should be able to allocate at least one eden region");
603 assert(young_list_target_length >= absolute_min_length, "post-condition");
604
605 return young_list_target_length;
606 }
607
608 uint
609 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
610 uint base_min_length,
828 // "Nuke" the heuristics that control the young/mixed GC
829 // transitions and make sure we start with young GCs after the Full GC.
830 collector_state()->set_gcs_are_young(true);
831 collector_state()->set_last_young_gc(false);
832 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
833 collector_state()->set_during_initial_mark_pause(false);
834 collector_state()->set_in_marking_window(false);
835 collector_state()->set_in_marking_window_im(false);
836
837 _short_lived_surv_rate_group->start_adding_regions();
838 // also call this on any additional surv rate groups
839
840 record_survivor_regions(0, NULL, NULL);
841
842 _free_regions_at_end_of_collection = _g1->num_free_regions();
843 // Reset survivors SurvRateGroup.
844 _survivor_surv_rate_group->reset();
845 update_young_list_max_and_target_length();
846 update_rs_lengths_prediction();
847 _collectionSetChooser->clear();
848
849 _last_old_allocated_bytes = 0;
850
851 record_pause(FullGC, _full_collection_start_sec, end_sec);
852 }
853
854 void G1CollectorPolicy::record_stop_world_start() {
855 _stop_world_start = os::elapsedTime();
856 }
857
858 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
859 // We only need to do this here as the policy will only be applied
860 // to the GC we're about to start. so, no point is calculating this
861 // every time we calculate / recalculate the target young length.
862 update_survivors_policy();
863
864 assert(_g1->used() == _g1->recalculate_used(),
865 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
866 _g1->used(), _g1->recalculate_used());
867
868 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
869 _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
870 _stop_world_start = 0.0;
871
889 void G1CollectorPolicy::record_concurrent_mark_init_end(double
890 mark_init_elapsed_time_ms) {
891 collector_state()->set_during_marking(true);
892 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
893 collector_state()->set_during_initial_mark_pause(false);
894 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
895 }
896
897 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
898 _mark_remark_start_sec = os::elapsedTime();
899 collector_state()->set_during_marking(false);
900 }
901
902 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
903 double end_time_sec = os::elapsedTime();
904 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
905 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
906 _cur_mark_stop_world_time_ms += elapsed_time_ms;
907 _prev_collection_pause_end_ms += elapsed_time_ms;
908
909 record_pause(Remark, _mark_remark_start_sec, end_time_sec);
910 }
911
912 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
913 _mark_cleanup_start_sec = os::elapsedTime();
914 }
915
916 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
917 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
918 "skip last young-only gc");
919 collector_state()->set_last_young_gc(should_continue_with_reclaim);
920 // We abort the marking phase.
921 if (!should_continue_with_reclaim) {
922 abort_time_to_mixed_tracking();
923 }
924 collector_state()->set_in_marking_window(false);
925 }
926
927 void G1CollectorPolicy::record_concurrent_pause() {
928 if (_stop_world_start > 0.0) {
929 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
930 _trace_young_gen_time_data.record_yield_time(yield_ms);
931 }
932 }
933
934 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
935 return phase_times()->average_time_ms(phase);
936 }
937
938 double G1CollectorPolicy::young_other_time_ms() const {
939 return phase_times()->young_cset_choice_time_ms() +
940 phase_times()->young_free_cset_time_ms();
941 }
942
943 double G1CollectorPolicy::non_young_other_time_ms() const {
950 return pause_time_ms -
951 average_time_ms(G1GCPhaseTimes::UpdateRS) -
952 average_time_ms(G1GCPhaseTimes::ScanRS) -
953 average_time_ms(G1GCPhaseTimes::ObjCopy) -
954 average_time_ms(G1GCPhaseTimes::Termination);
955 }
956
957 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
958 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
959 }
960
961 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
962 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
963 }
964
965 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
966 if (about_to_start_mixed_phase()) {
967 return false;
968 }
969
970 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
971
972 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
973 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
974 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
975
976 if (marking_request_bytes > marking_initiating_used_threshold) {
977 if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
978 ergo_verbose5(ErgoConcCycles,
979 "request concurrent cycle initiation",
980 ergo_format_reason("occupancy higher than threshold")
981 ergo_format_byte("occupancy")
982 ergo_format_byte("allocation request")
983 ergo_format_byte_perc("threshold")
984 ergo_format_str("source"),
985 cur_used_bytes,
986 alloc_byte_size,
987 marking_initiating_used_threshold,
988 (double) marking_initiating_used_threshold / _g1->capacity() * 100,
989 source);
990 return true;
991 } else {
992 ergo_verbose5(ErgoConcCycles,
993 "do not request concurrent cycle initiation",
994 ergo_format_reason("still doing mixed collections")
995 ergo_format_byte("occupancy")
996 ergo_format_byte("allocation request")
997 ergo_format_byte_perc("threshold")
998 ergo_format_str("source"),
999 cur_used_bytes,
1000 alloc_byte_size,
1001 marking_initiating_used_threshold,
1002 (double) InitiatingHeapOccupancyPercent,
1003 source);
1004 }
1005 }
1006
1007 return false;
1008 }
1009
1010 // Anything below that is considered to be zero
1011 #define MIN_TIMER_GRANULARITY 0.0000001
1012
1013 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
1014 double end_time_sec = os::elapsedTime();
1015 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
1016 "otherwise, the subtraction below does not make sense");
1017 size_t cur_used_bytes = _g1->used();
1018 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1019 bool last_pause_included_initial_mark = false;
1020 bool update_stats = !_g1->evacuation_failed();
1021
1022 #ifndef PRODUCT
1023 if (G1YoungSurvRateVerbose) {
1024 gclog_or_tty->cr();
1025 _short_lived_surv_rate_group->print();
1026 // do that for any other surv rate groups too
1027 }
1028 #endif // PRODUCT
1029
1030 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
1031
1032 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1033 if (last_pause_included_initial_mark) {
1034 record_concurrent_mark_init_end(0.0);
1035 } else {
1036 maybe_start_marking();
1037 }
1038
1039 double app_time_ms = 1.0;
1040
1041 if (update_stats) {
1042 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1043 // this is where we update the allocation rate of the application
1044 app_time_ms =
1045 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1046 if (app_time_ms < MIN_TIMER_GRANULARITY) {
1047 // This usually happens due to the timer not having the required
1048 // granularity. Some Linuxes are the usual culprits.
1049 // We'll just set it to something (arbitrarily) small.
1050 app_time_ms = 1.0;
1051 }
1052 // We maintain the invariant that all objects allocated by mutator
1053 // threads will be allocated out of eden regions. So, we can use
1054 // the eden region number allocated since the previous GC to
1055 // calculate the application's allocate rate. The only exception
1056 // to that is humongous objects that are allocated separately. But
1057 // given that humongous object allocations do not really affect
1058 // either the pause's duration nor when the next pause will take
1059 // place we can safely ignore them here.
1060 uint regions_allocated = eden_cset_region_length();
1061 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1062 _alloc_rate_ms_seq->add(alloc_rate_ms);
1063
1064 double interval_ms =
1089 _recent_avg_pause_time_ratio = 1.0;
1090 }
1091 }
1092 }
1093
1094 bool new_in_marking_window = collector_state()->in_marking_window();
1095 bool new_in_marking_window_im = false;
1096 if (last_pause_included_initial_mark) {
1097 new_in_marking_window = true;
1098 new_in_marking_window_im = true;
1099 }
1100
1101 if (collector_state()->last_young_gc()) {
1102 // This is supposed to to be the "last young GC" before we start
1103 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1104 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
1105
1106 if (next_gc_should_be_mixed("start mixed GCs",
1107 "do not start mixed GCs")) {
1108 collector_state()->set_gcs_are_young(false);
1109 } else {
1110 // We aborted the mixed GC phase early.
1111 abort_time_to_mixed_tracking();
1112 }
1113
1114 collector_state()->set_last_young_gc(false);
1115 }
1116
1117 if (!collector_state()->last_gc_was_young()) {
1118 // This is a mixed GC. Here we decide whether to continue doing
1119 // mixed GCs or not.
1120 if (!next_gc_should_be_mixed("continue mixed GCs",
1121 "do not continue mixed GCs")) {
1122 collector_state()->set_gcs_are_young(true);
1123
1124 maybe_start_marking();
1125 }
1126 }
1127
1128 _short_lived_surv_rate_group->start_adding_regions();
1129 // Do that for any other surv rate groups
1130
1131 if (update_stats) {
1132 double cost_per_card_ms = 0.0;
1133 double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
1134 if (_pending_cards > 0) {
1135 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
1136 _cost_per_card_ms_seq->add(cost_per_card_ms);
1137 }
1138 _cost_scan_hcc_seq->add(cost_scan_hcc);
1139
1191
1192 if (young_cset_region_length() > 0) {
1193 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
1194 young_cset_region_length());
1195 }
1196
1197 if (old_cset_region_length() > 0) {
1198 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
1199 old_cset_region_length());
1200 }
1201
1202 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1203
1204 _pending_cards_seq->add((double) _pending_cards);
1205 _rs_lengths_seq->add((double) _max_rs_lengths);
1206 }
1207
1208 collector_state()->set_in_marking_window(new_in_marking_window);
1209 collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1210 _free_regions_at_end_of_collection = _g1->num_free_regions();
1211 // IHOP control wants to know the expected young gen length if it were not
1212 // restrained by the heap reserve. Using the actual length would make the
1213 // prediction too small and the limit the young gen every time we get to the
1214 // predicted target occupancy.
1215 size_t last_unrestrained_young_length = 0;
1216 update_young_list_max_and_target_length(&last_unrestrained_young_length);
1217 update_rs_lengths_prediction();
1218
1219 double marking_to_mixed_time = -1.0;
1220 if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
1221 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
1222 assert(marking_to_mixed_time > 0.0,
1223 "Initial mark to mixed time must be larger than zero but is %.3f",
1224 marking_to_mixed_time);
1225 }
1226 // Only update IHOP information on regular GCs.
1227 if (update_stats) {
1228 update_ihop_statistics(marking_to_mixed_time,
1229 app_time_ms / 1000.0,
1230 _last_old_allocated_bytes,
1231 last_unrestrained_young_length * HeapRegion::GrainBytes);
1232 }
1233 _last_old_allocated_bytes = 0;
1234
1235 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1236 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1237
1238 double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1239
1240 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1241 ergo_verbose2(ErgoTiming,
1242 "adjust concurrent refinement thresholds",
1243 ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
1244 ergo_format_ms("Update RS time goal")
1245 ergo_format_ms("Scan HCC time"),
1246 update_rs_time_goal_ms,
1247 scan_hcc_time_ms);
1248
1249 update_rs_time_goal_ms = 0;
1250 } else {
1251 update_rs_time_goal_ms -= scan_hcc_time_ms;
1252 }
1253 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1254 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1255 update_rs_time_goal_ms);
1256
1257 _collectionSetChooser->verify();
1258 }
1259
1260 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
1261 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent,
1262 G1CollectedHeap::heap()->max_capacity());
1263 }
1264
1265 void G1CollectorPolicy::update_ihop_statistics(double marking_time,
1266 double mutator_time_s,
1267 size_t mutator_alloc_bytes,
1268 size_t young_gen_size) {
1269 bool report = false;
1270
1271 // To avoid using really small times that may be caused by e.g. back-to-back gcs
1272 // we filter them out.
1273 double const min_valid_time = 1e-6;
1274
1275 if (marking_time > min_valid_time) {
1276 _ihop_control->update_time_to_mixed(marking_time);
1277 report = true;
1278 }
1279
1280 // As an approximation for the young gc promotion rates during marking we use
1281 // all of them. In many applications there are only a few if any young gcs during
1282 // marking, which makes any prediction useless. This increases the accuracy of the
1283 // prediction.
1284 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
1285 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
1286 report = true;
1287 }
1288
1289 if (report) {
1290 report_ihop_statistics();
1291 }
1292 }
1293
1294 void G1CollectorPolicy::report_ihop_statistics() {
1295 _ihop_control->print();
1296 }
1297
1298 #define EXT_SIZE_FORMAT "%.1f%s"
1299 #define EXT_SIZE_PARAMS(bytes) \
1300 byte_size_in_proper_unit((double)(bytes)), \
1301 proper_unit_for_byte_size((bytes))
1302
1303 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1304 YoungList* young_list = _g1->young_list();
1305 _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1306 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1307 _heap_capacity_bytes_before_gc = _g1->capacity();
1308 _heap_used_bytes_before_gc = _g1->used();
1309 _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1310
1311 _eden_capacity_bytes_before_gc =
1312 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1313
1314 if (full) {
1315 _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1316 }
1317 }
1790
1791 public:
1792 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
1793 AbstractGangTask("ParKnownGarbageTask"),
1794 _hrSorted(hrSorted), _chunk_size(chunk_size),
1795 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
1796
1797 void work(uint worker_id) {
1798 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1799 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
1800 }
1801 };
1802
1803 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
1804 assert(n_workers > 0, "Active gc workers should be greater than 0");
1805 const uint overpartition_factor = 4;
1806 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1807 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1808 }
1809
1810 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1811 _collectionSetChooser->clear();
1812
1813 WorkGang* workers = _g1->workers();
1814 uint n_workers = workers->active_workers();
1815
1816 uint n_regions = _g1->num_regions();
1817 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1818 _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1819 ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
1820 workers->run_task(&par_known_garbage_task);
1821
1822 _collectionSetChooser->sort_regions();
1823
1824 double end_sec = os::elapsedTime();
1825 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1826 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1827 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1828 _prev_collection_pause_end_ms += elapsed_time_ms;
1829
1830 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1831 }
1832
1833 // Add the heap region at the head of the non-incremental collection set
1834 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1835 assert(_inc_cset_build_state == Active, "Precondition");
1836 assert(hr->is_old(), "the region should be old");
1837
1838 assert(!hr->in_collection_set(), "should not already be in the CSet");
1839 _g1->register_old_region_with_cset(hr);
1840 hr->set_next_in_collection_set(_collection_set);
1841 _collection_set = hr;
1842 _collection_set_bytes_used_before += hr->used();
1843 size_t rs_length = hr->rem_set()->occupied();
1844 _recorded_rs_lengths += rs_length;
1845 _old_cset_region_length += 1;
1846 }
1847
1848 // Initialize the per-collection-set information
1849 void G1CollectorPolicy::start_incremental_cset_building() {
1850 assert(_inc_cset_build_state == Inactive, "Precondition");
2024 csr = next;
2025 }
2026 }
2027 #endif // !PRODUCT
2028
2029 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
2030 // Returns the given amount of reclaimable bytes (that represents
2031 // the amount of reclaimable space still to be collected) as a
2032 // percentage of the current heap capacity.
2033 size_t capacity_bytes = _g1->capacity();
2034 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
2035 }
2036
2037 void G1CollectorPolicy::maybe_start_marking() {
2038 if (need_to_start_conc_mark("end of GC")) {
2039 // Note: this might have already been set, if during the last
2040 // pause we decided to start a cycle but at the beginning of
2041 // this pause we decided to postpone it. That's OK.
2042 collector_state()->set_initiate_conc_mark_if_possible(true);
2043 }
2044 }
2045
2046 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const {
2047 assert(!collector_state()->full_collection(), "must be");
2048 if (collector_state()->during_initial_mark_pause()) {
2049 assert(collector_state()->last_gc_was_young(), "must be");
2050 assert(!collector_state()->last_young_gc(), "must be");
2051 return InitialMarkGC;
2052 } else if (collector_state()->last_young_gc()) {
2053 assert(!collector_state()->during_initial_mark_pause(), "must be");
2054 assert(collector_state()->last_gc_was_young(), "must be");
2055 return LastYoungGC;
2056 } else if (!collector_state()->last_gc_was_young()) {
2057 assert(!collector_state()->during_initial_mark_pause(), "must be");
2058 assert(!collector_state()->last_young_gc(), "must be");
2059 return MixedGC;
2060 } else {
2061 assert(collector_state()->last_gc_was_young(), "must be");
2062 assert(!collector_state()->during_initial_mark_pause(), "must be");
2063 assert(!collector_state()->last_young_gc(), "must be");
2064 return YoungOnlyGC;
2065 }
2066 }
2067
2068 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) {
2069 // Manage the MMU tracker. For some reason it ignores Full GCs.
2070 if (kind != FullGC) {
2071 _mmu_tracker->add_pause(start, end);
2072 }
2073 // Manage the mutator time tracking from initial mark to first mixed gc.
2074 switch (kind) {
2075 case FullGC:
2076 abort_time_to_mixed_tracking();
2077 break;
2078 case Cleanup:
2079 case Remark:
2080 case YoungOnlyGC:
2081 case LastYoungGC:
2082 _initial_mark_to_mixed.add_pause(end - start);
2083 break;
2084 case InitialMarkGC:
2085 _initial_mark_to_mixed.record_initial_mark_end(end);
2086 break;
2087 case MixedGC:
2088 _initial_mark_to_mixed.record_mixed_gc_start(start);
2089 break;
2090 default:
2091 ShouldNotReachHere();
2092 }
2093 }
2094
2095 void G1CollectorPolicy::abort_time_to_mixed_tracking() {
2096 _initial_mark_to_mixed.reset();
2097 }
2098
2099 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
2100 const char* false_action_str) const {
2101 CollectionSetChooser* cset_chooser = _collectionSetChooser;
2102 if (cset_chooser->is_empty()) {
2103 ergo_verbose0(ErgoMixedGCs,
2104 false_action_str,
2105 ergo_format_reason("candidate old regions not available"));
2106 return false;
2107 }
2108
2109 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
2110 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2111 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2112 double threshold = (double) G1HeapWastePercent;
2113 if (reclaimable_perc <= threshold) {
2114 ergo_verbose4(ErgoMixedGCs,
2115 false_action_str,
2116 ergo_format_reason("reclaimable percentage not over threshold")
|