95 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
96 _prev_collection_pause_end_ms(0.0),
97 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
98 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
99 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
100 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
104 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
105 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
106 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
107 _non_young_other_cost_per_region_ms_seq(
108 new TruncatedSeq(TruncatedSeqLength)),
109
110 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
111 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
112
113 _pause_time_target_ms((double) MaxGCPauseMillis),
114
115 _gcs_are_young(true),
116
117 _during_marking(false),
118 _in_marking_window(false),
119 _in_marking_window_im(false),
120
121 _recent_prev_end_times_for_all_gcs_sec(
122 new TruncatedSeq(NumPrevPausesForHeuristics)),
123
124 _recent_avg_pause_time_ratio(0.0),
125
126 _initiate_conc_mark_if_possible(false),
127 _during_initial_mark_pause(false),
128 _last_young_gc(false),
129 _last_gc_was_young(false),
130
131 _eden_used_bytes_before_gc(0),
132 _survivor_used_bytes_before_gc(0),
133 _heap_used_bytes_before_gc(0),
134 _metaspace_used_bytes_before_gc(0),
135 _eden_capacity_bytes_before_gc(0),
136 _heap_capacity_bytes_before_gc(0),
137
138 _eden_cset_region_length(0),
139 _survivor_cset_region_length(0),
140 _old_cset_region_length(0),
141
142 _collection_set(NULL),
143 _collection_set_bytes_used_before(0),
144
145 // Incremental CSet attributes
146 _inc_cset_build_state(Inactive),
147 _inc_cset_head(NULL),
148 _inc_cset_tail(NULL),
149 _inc_cset_bytes_used_before(0),
150 _inc_cset_max_finger(NULL),
550 uint desired_min_length =
551 calculate_young_list_desired_min_length(base_min_length);
552 if (desired_min_length < absolute_min_length) {
553 desired_min_length = absolute_min_length;
554 }
555
556 // Calculate the absolute and desired max bounds.
557
558 // We will try our best not to "eat" into the reserve.
559 uint absolute_max_length = 0;
560 if (_free_regions_at_end_of_collection > _reserve_regions) {
561 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
562 }
563 uint desired_max_length = calculate_young_list_desired_max_length();
564 if (desired_max_length > absolute_max_length) {
565 desired_max_length = absolute_max_length;
566 }
567
568 uint young_list_target_length = 0;
569 if (adaptive_young_list_length()) {
570 if (gcs_are_young()) {
571 young_list_target_length =
572 calculate_young_list_target_length(rs_lengths,
573 base_min_length,
574 desired_min_length,
575 desired_max_length);
576 _rs_lengths_prediction = rs_lengths;
577 } else {
578 // Don't calculate anything and let the code below bound it to
579 // the desired_min_length, i.e., do the next GC as soon as
580 // possible to maximize how many old regions we can add to it.
581 }
582 } else {
583 // The user asked for a fixed young gen so we'll fix the young gen
584 // whether the next GC is young or mixed.
585 young_list_target_length = _young_list_fixed_length;
586 }
587
588 // Make sure we don't go over the desired max length, nor under the
589 // desired min length. In case they clash, desired_min_length wins
590 // which is why that test is second.
592 young_list_target_length = desired_max_length;
593 }
594 if (young_list_target_length < desired_min_length) {
595 young_list_target_length = desired_min_length;
596 }
597
598 assert(young_list_target_length > recorded_survivor_regions(),
599 "we should be able to allocate at least one eden region");
600 assert(young_list_target_length >= absolute_min_length, "post-condition");
601 _young_list_target_length = young_list_target_length;
602
603 update_max_gc_locker_expansion();
604 }
605
606 uint
607 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
608 uint base_min_length,
609 uint desired_min_length,
610 uint desired_max_length) {
611 assert(adaptive_young_list_length(), "pre-condition");
612 assert(gcs_are_young(), "only call this for young GCs");
613
614 // In case some edge-condition makes the desired max length too small...
615 if (desired_max_length <= desired_min_length) {
616 return desired_min_length;
617 }
618
619 // We'll adjust min_young_length and max_young_length not to include
620 // the already allocated young regions (i.e., so they reflect the
621 // min and max eden regions we'll allocate). The base_min_length
622 // will be reflected in the predictions by the
623 // survivor_regions_evac_time prediction.
624 assert(desired_min_length > base_min_length, "invariant");
625 uint min_young_length = desired_min_length - base_min_length;
626 assert(desired_max_length > base_min_length, "invariant");
627 uint max_young_length = desired_max_length - base_min_length;
628
629 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
630 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
631 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
632 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
695 base_free_regions, target_pause_time_ms),
696 "min_young_length, the result of the binary search, should "
697 "fit into the pause target");
698 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
699 base_free_regions, target_pause_time_ms),
700 "min_young_length, the result of the binary search, should be "
701 "optimal, so no larger length should fit into the pause target");
702 }
703 } else {
704 // Even the minimum length doesn't fit into the pause time
705 // target, return it as the result nevertheless.
706 }
707 return base_min_length + min_young_length;
708 }
709
710 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
711 double survivor_regions_evac_time = 0.0;
712 for (HeapRegion * r = _recorded_survivor_head;
713 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
714 r = r->get_next_young_region()) {
715 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
716 }
717 return survivor_regions_evac_time;
718 }
719
720 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
721 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
722
723 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
724 if (rs_lengths > _rs_lengths_prediction) {
725 // add 10% to avoid having to recalculate often
726 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
727 update_young_list_target_length(rs_lengths_prediction);
728 }
729 }
730
731
732
733 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
734 bool is_tlab,
735 bool* gc_overhead_limit_was_exceeded) {
798 record_heap_size_info_at_start(true /* full */);
799 // Release the future to-space so that it is available for compaction into.
800 _g1->set_full_collection();
801 }
802
803 void G1CollectorPolicy::record_full_collection_end() {
804 // Consider this like a collection pause for the purposes of allocation
805 // since last pause.
806 double end_sec = os::elapsedTime();
807 double full_gc_time_sec = end_sec - _full_collection_start_sec;
808 double full_gc_time_ms = full_gc_time_sec * 1000.0;
809
810 _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
811
812 update_recent_gc_times(end_sec, full_gc_time_ms);
813
814 _g1->clear_full_collection();
815
816 // "Nuke" the heuristics that control the young/mixed GC
817 // transitions and make sure we start with young GCs after the Full GC.
818 set_gcs_are_young(true);
819 _last_young_gc = false;
820 clear_initiate_conc_mark_if_possible();
821 clear_during_initial_mark_pause();
822 _in_marking_window = false;
823 _in_marking_window_im = false;
824
825 _short_lived_surv_rate_group->start_adding_regions();
826 // also call this on any additional surv rate groups
827
828 record_survivor_regions(0, NULL, NULL);
829
830 _free_regions_at_end_of_collection = _g1->num_free_regions();
831 // Reset survivors SurvRateGroup.
832 _survivor_surv_rate_group->reset();
833 update_young_list_target_length();
834 _collectionSetChooser->clear();
835 }
836
837 void G1CollectorPolicy::record_stop_world_start() {
838 _stop_world_start = os::elapsedTime();
839 }
840
841 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
842 // We only need to do this here as the policy will only be applied
843 // to the GC we're about to start. so, no point is calculating this
844 // every time we calculate / recalculate the target young length.
845 update_survivors_policy();
846
847 assert(_g1->used() == _g1->recalculate_used(),
848 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
849 _g1->used(), _g1->recalculate_used()));
850
851 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
852 _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
853 _stop_world_start = 0.0;
854
855 record_heap_size_info_at_start(false /* full */);
856
857 phase_times()->record_cur_collection_start_sec(start_time_sec);
858 _pending_cards = _g1->pending_card_num();
859
860 _collection_set_bytes_used_before = 0;
861 _bytes_copied_during_gc = 0;
862
863 _last_gc_was_young = false;
864
865 // do that for any other surv rate groups
866 _short_lived_surv_rate_group->stop_adding_regions();
867 _survivors_age_table.clear();
868
869 assert( verify_young_ages(), "region age verification" );
870 }
871
872 void G1CollectorPolicy::record_concurrent_mark_init_end(double
873 mark_init_elapsed_time_ms) {
874 _during_marking = true;
875 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
876 clear_during_initial_mark_pause();
877 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
878 }
879
880 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
881 _mark_remark_start_sec = os::elapsedTime();
882 _during_marking = false;
883 }
884
885 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
886 double end_time_sec = os::elapsedTime();
887 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
888 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
889 _cur_mark_stop_world_time_ms += elapsed_time_ms;
890 _prev_collection_pause_end_ms += elapsed_time_ms;
891
892 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
893 }
894
895 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
896 _mark_cleanup_start_sec = os::elapsedTime();
897 }
898
899 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
900 _last_young_gc = true;
901 _in_marking_window = false;
902 }
903
904 void G1CollectorPolicy::record_concurrent_pause() {
905 if (_stop_world_start > 0.0) {
906 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
907 _trace_young_gen_time_data.record_yield_time(yield_ms);
908 }
909 }
910
911 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
912 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
913 return false;
914 }
915
916 size_t marking_initiating_used_threshold =
917 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
918 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
919 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
920
921 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
922 if (gcs_are_young() && !_last_young_gc) {
923 ergo_verbose5(ErgoConcCycles,
924 "request concurrent cycle initiation",
925 ergo_format_reason("occupancy higher than threshold")
926 ergo_format_byte("occupancy")
927 ergo_format_byte("allocation request")
928 ergo_format_byte_perc("threshold")
929 ergo_format_str("source"),
930 cur_used_bytes,
931 alloc_byte_size,
932 marking_initiating_used_threshold,
933 (double) InitiatingHeapOccupancyPercent,
934 source);
935 return true;
936 } else {
937 ergo_verbose5(ErgoConcCycles,
938 "do not request concurrent cycle initiation",
939 ergo_format_reason("still doing mixed collections")
940 ergo_format_byte("occupancy")
941 ergo_format_byte("allocation request")
942 ergo_format_byte_perc("threshold")
957
958 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
959 double end_time_sec = os::elapsedTime();
960 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
961 "otherwise, the subtraction below does not make sense");
962 size_t rs_size =
963 _cur_collection_pause_used_regions_at_start - cset_region_length();
964 size_t cur_used_bytes = _g1->used();
965 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
966 bool last_pause_included_initial_mark = false;
967 bool update_stats = !_g1->evacuation_failed();
968
969 #ifndef PRODUCT
970 if (G1YoungSurvRateVerbose) {
971 gclog_or_tty->cr();
972 _short_lived_surv_rate_group->print();
973 // do that for any other surv rate groups too
974 }
975 #endif // PRODUCT
976
977 last_pause_included_initial_mark = during_initial_mark_pause();
978 if (last_pause_included_initial_mark) {
979 record_concurrent_mark_init_end(0.0);
980 } else if (need_to_start_conc_mark("end of GC")) {
981 // Note: this might have already been set, if during the last
982 // pause we decided to start a cycle but at the beginning of
983 // this pause we decided to postpone it. That's OK.
984 set_initiate_conc_mark_if_possible();
985 }
986
987 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
988 end_time_sec, false);
989
990 evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
991 evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
992
993 if (update_stats) {
994 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
995 // this is where we update the allocation rate of the application
996 double app_time_ms =
997 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
998 if (app_time_ms < MIN_TIMER_GRANULARITY) {
999 // This usually happens due to the timer not having the required
1000 // granularity. Some Linuxes are the usual culprits.
1001 // We'll just set it to something (arbitrarily) small.
1002 app_time_ms = 1.0;
1003 }
1004 // We maintain the invariant that all objects allocated by mutator
1026 gclog_or_tty->print_cr("Recent GC Times (ms):");
1027 _recent_gc_times_ms->dump();
1028 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
1029 _recent_prev_end_times_for_all_gcs_sec->dump();
1030 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
1031 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
1032 // In debug mode, terminate the JVM if the user wants to debug at this point.
1033 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
1034 #endif // !PRODUCT
1035 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1036 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1037 if (_recent_avg_pause_time_ratio < 0.0) {
1038 _recent_avg_pause_time_ratio = 0.0;
1039 } else {
1040 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1041 _recent_avg_pause_time_ratio = 1.0;
1042 }
1043 }
1044 }
1045
1046 bool new_in_marking_window = _in_marking_window;
1047 bool new_in_marking_window_im = false;
1048 if (last_pause_included_initial_mark) {
1049 new_in_marking_window = true;
1050 new_in_marking_window_im = true;
1051 }
1052
1053 if (_last_young_gc) {
1054 // This is supposed to to be the "last young GC" before we start
1055 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1056
1057 if (!last_pause_included_initial_mark) {
1058 if (next_gc_should_be_mixed("start mixed GCs",
1059 "do not start mixed GCs")) {
1060 set_gcs_are_young(false);
1061 }
1062 } else {
1063 ergo_verbose0(ErgoMixedGCs,
1064 "do not start mixed GCs",
1065 ergo_format_reason("concurrent cycle is about to start"));
1066 }
1067 _last_young_gc = false;
1068 }
1069
1070 if (!_last_gc_was_young) {
1071 // This is a mixed GC. Here we decide whether to continue doing
1072 // mixed GCs or not.
1073
1074 if (!next_gc_should_be_mixed("continue mixed GCs",
1075 "do not continue mixed GCs")) {
1076 set_gcs_are_young(true);
1077 }
1078 }
1079
1080 _short_lived_surv_rate_group->start_adding_regions();
1081 // Do that for any other surv rate groups
1082
1083 if (update_stats) {
1084 double cost_per_card_ms = 0.0;
1085 if (_pending_cards > 0) {
1086 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
1087 _cost_per_card_ms_seq->add(cost_per_card_ms);
1088 }
1089
1090 size_t cards_scanned = _g1->cards_scanned();
1091
1092 double cost_per_entry_ms = 0.0;
1093 if (cards_scanned > 10) {
1094 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
1095 if (_last_gc_was_young) {
1096 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1097 } else {
1098 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1099 }
1100 }
1101
1102 if (_max_rs_lengths > 0) {
1103 double cards_per_entry_ratio =
1104 (double) cards_scanned / (double) _max_rs_lengths;
1105 if (_last_gc_was_young) {
1106 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1107 } else {
1108 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1109 }
1110 }
1111
1112 // This is defensive. For a while _max_rs_lengths could get
1113 // smaller than _recorded_rs_lengths which was causing
1114 // rs_length_diff to get very large and mess up the RSet length
1115 // predictions. The reason was unsafe concurrent updates to the
1116 // _inc_cset_recorded_rs_lengths field which the code below guards
1117 // against (see CR 7118202). This bug has now been fixed (see CR
1118 // 7119027). However, I'm still worried that
1119 // _inc_cset_recorded_rs_lengths might still end up somewhat
1120 // inaccurate. The concurrent refinement thread calculates an
1121 // RSet's length concurrently with other CR threads updating it
1122 // which might cause it to calculate the length incorrectly (if,
1123 // say, it's in mid-coarsening). So I'll leave in the defensive
1124 // conditional below just in case.
1125 size_t rs_length_diff = 0;
1126 if (_max_rs_lengths > _recorded_rs_lengths) {
1127 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1128 }
1129 _rs_length_diff_seq->add((double) rs_length_diff);
1130
1131 size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
1132 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
1133 double cost_per_byte_ms = 0.0;
1134
1135 if (copied_bytes > 0) {
1136 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
1137 if (_in_marking_window) {
1138 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1139 } else {
1140 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1141 }
1142 }
1143
1144 double all_other_time_ms = pause_time_ms -
1145 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
1146 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
1147
1148 double young_other_time_ms = 0.0;
1149 if (young_cset_region_length() > 0) {
1150 young_other_time_ms =
1151 phase_times()->young_cset_choice_time_ms() +
1152 phase_times()->young_free_cset_time_ms();
1153 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1154 (double) young_cset_region_length());
1155 }
1156 double non_young_other_time_ms = 0.0;
1157 if (old_cset_region_length() > 0) {
1160 phase_times()->non_young_free_cset_time_ms();
1161
1162 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1163 (double) old_cset_region_length());
1164 }
1165
1166 double constant_other_time_ms = all_other_time_ms -
1167 (young_other_time_ms + non_young_other_time_ms);
1168 _constant_other_time_ms_seq->add(constant_other_time_ms);
1169
1170 double survival_ratio = 0.0;
1171 if (_collection_set_bytes_used_before > 0) {
1172 survival_ratio = (double) _bytes_copied_during_gc /
1173 (double) _collection_set_bytes_used_before;
1174 }
1175
1176 _pending_cards_seq->add((double) _pending_cards);
1177 _rs_lengths_seq->add((double) _max_rs_lengths);
1178 }
1179
1180 _in_marking_window = new_in_marking_window;
1181 _in_marking_window_im = new_in_marking_window_im;
1182 _free_regions_at_end_of_collection = _g1->num_free_regions();
1183 update_young_list_target_length();
1184
1185 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1186 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1187 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
1188 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
1189
1190 _collectionSetChooser->verify();
1191 }
1192
1193 #define EXT_SIZE_FORMAT "%.1f%s"
1194 #define EXT_SIZE_PARAMS(bytes) \
1195 byte_size_in_proper_unit((double)(bytes)), \
1196 proper_unit_for_byte_size((bytes))
1197
1198 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1199 YoungList* young_list = _g1->young_list();
1200 _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1201 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1289 dcqs.set_completed_queue_padding(curr_queue_size);
1290 } else {
1291 dcqs.set_completed_queue_padding(0);
1292 }
1293 dcqs.notify_if_necessary();
1294 }
1295
1296 double
1297 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1298 size_t scanned_cards) {
1299 return
1300 predict_rs_update_time_ms(pending_cards) +
1301 predict_rs_scan_time_ms(scanned_cards) +
1302 predict_constant_other_time_ms();
1303 }
1304
1305 double
1306 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1307 size_t rs_length = predict_rs_length_diff();
1308 size_t card_num;
1309 if (gcs_are_young()) {
1310 card_num = predict_young_card_num(rs_length);
1311 } else {
1312 card_num = predict_non_young_card_num(rs_length);
1313 }
1314 return predict_base_elapsed_time_ms(pending_cards, card_num);
1315 }
1316
1317 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1318 size_t bytes_to_copy;
1319 if (hr->is_marked())
1320 bytes_to_copy = hr->max_live_bytes();
1321 else {
1322 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1323 int age = hr->age_in_surv_rate_group();
1324 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1325 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1326 }
1327 return bytes_to_copy;
1328 }
1329
1455 void G1CollectorPolicy::update_survivors_policy() {
1456 double max_survivor_regions_d =
1457 (double) _young_list_target_length / (double) SurvivorRatio;
1458 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1459 // smaller than 1.0) we'll get 1.
1460 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1461
1462 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1463 HeapRegion::GrainWords * _max_survivor_regions);
1464 }
1465
1466 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1467 GCCause::Cause gc_cause) {
1468 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1469 if (!during_cycle) {
1470 ergo_verbose1(ErgoConcCycles,
1471 "request concurrent cycle initiation",
1472 ergo_format_reason("requested by GC cause")
1473 ergo_format_str("GC cause"),
1474 GCCause::to_string(gc_cause));
1475 set_initiate_conc_mark_if_possible();
1476 return true;
1477 } else {
1478 ergo_verbose1(ErgoConcCycles,
1479 "do not request concurrent cycle initiation",
1480 ergo_format_reason("concurrent cycle already in progress")
1481 ergo_format_str("GC cause"),
1482 GCCause::to_string(gc_cause));
1483 return false;
1484 }
1485 }
1486
1487 void
1488 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1489 // We are about to decide on whether this pause will be an
1490 // initial-mark pause.
1491
1492 // First, during_initial_mark_pause() should not be already set. We
1493 // will set it here if we have to. However, it should be cleared by
1494 // the end of the pause (it's only set for the duration of an
1495 // initial-mark pause).
1496 assert(!during_initial_mark_pause(), "pre-condition");
1497
1498 if (initiate_conc_mark_if_possible()) {
1499 // We had noticed on a previous pause that the heap occupancy has
1500 // gone over the initiating threshold and we should start a
1501 // concurrent marking cycle. So we might initiate one.
1502
1503 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1504 if (!during_cycle) {
1505 // The concurrent marking thread is not "during a cycle", i.e.,
1506 // it has completed the last one. So we can go ahead and
1507 // initiate a new cycle.
1508
1509 set_during_initial_mark_pause();
1510 // We do not allow mixed GCs during marking.
1511 if (!gcs_are_young()) {
1512 set_gcs_are_young(true);
1513 ergo_verbose0(ErgoMixedGCs,
1514 "end mixed GCs",
1515 ergo_format_reason("concurrent cycle is about to start"));
1516 }
1517
1518 // And we can now clear initiate_conc_mark_if_possible() as
1519 // we've already acted on it.
1520 clear_initiate_conc_mark_if_possible();
1521
1522 ergo_verbose0(ErgoConcCycles,
1523 "initiate concurrent cycle",
1524 ergo_format_reason("concurrent cycle initiation requested"));
1525 } else {
1526 // The concurrent marking thread is still finishing up the
1527 // previous cycle. If we start one right now the two cycles
1528 // overlap. In particular, the concurrent marking thread might
1529 // be in the process of clearing the next marking bitmap (which
1530 // we will use for the next cycle if we start one). Starting a
1531 // cycle now will be bad given that parts of the marking
1532 // information might get cleared by the marking thread. And we
1533 // cannot wait for the marking thread to finish the cycle as it
1534 // periodically yields while clearing the next marking bitmap
1535 // and, if it's in a yield point, it's waiting for us to
1536 // finish. So, at this point we will not start a cycle and we'll
1537 // let the concurrent marking thread complete the last one.
1538 ergo_verbose0(ErgoConcCycles,
1539 "do not initiate concurrent cycle",
1540 ergo_format_reason("concurrent cycle already in progress"));
1672 _inc_cset_predicted_elapsed_time_ms +=
1673 _inc_cset_predicted_elapsed_time_ms_diffs;
1674
1675 _inc_cset_recorded_rs_lengths_diffs = 0;
1676 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1677 }
1678
1679 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1680 // This routine is used when:
1681 // * adding survivor regions to the incremental cset at the end of an
1682 // evacuation pause,
1683 // * adding the current allocation region to the incremental cset
1684 // when it is retired, and
1685 // * updating existing policy information for a region in the
1686 // incremental cset via young list RSet sampling.
1687 // Therefore this routine may be called at a safepoint by the
1688 // VM thread, or in-between safepoints by mutator threads (when
1689 // retiring the current allocation region) or a concurrent
1690 // refine thread (RSet sampling).
1691
1692 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1693 size_t used_bytes = hr->used();
1694 _inc_cset_recorded_rs_lengths += rs_length;
1695 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1696 _inc_cset_bytes_used_before += used_bytes;
1697
1698 // Cache the values we have added to the aggregated information
1699 // in the heap region in case we have to remove this region from
1700 // the incremental collection set, or it is updated by the
1701 // rset sampling code
1702 hr->set_recorded_rs_length(rs_length);
1703 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1704 }
1705
1706 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1707 size_t new_rs_length) {
1708 // Update the CSet information that is dependent on the new RS length
1709 assert(hr->is_young(), "Precondition");
1710 assert(!SafepointSynchronize::is_at_safepoint(),
1711 "should not be at a safepoint");
1712
1713 // We could have updated _inc_cset_recorded_rs_lengths and
1714 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1715 // that atomically, as this code is executed by a concurrent
1716 // refinement thread, potentially concurrently with a mutator thread
1717 // allocating a new region and also updating the same fields. To
1718 // avoid the atomic operations we accumulate these updates on two
1719 // separate fields (*_diffs) and we'll just add them to the "main"
1720 // fields at the start of a GC.
1721
1722 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1723 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1724 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1725
1726 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1727 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1728 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1729 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1730
1731 hr->set_recorded_rs_length(new_rs_length);
1732 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1733 }
1734
1735 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1736 assert(hr->is_young(), "invariant");
1737 assert(hr->young_index_in_cset() > -1, "should have already been set");
1738 assert(_inc_cset_build_state == Active, "Precondition");
1739
1740 // We need to clear and set the cached recorded/cached collection set
1741 // information in the heap region here (before the region gets added
1742 // to the collection set). An individual heap region's cached values
1743 // are calculated, aggregated with the policy collection set info,
1744 // and cached in the heap region here (initially) and (subsequently)
1745 // by the Young List sampling code.
1746
1747 size_t rs_length = hr->rem_set()->occupied();
1902 YoungList* young_list = _g1->young_list();
1903 finalize_incremental_cset_building();
1904
1905 guarantee(target_pause_time_ms > 0.0,
1906 err_msg("target_pause_time_ms = %1.6lf should be positive",
1907 target_pause_time_ms));
1908 guarantee(_collection_set == NULL, "Precondition");
1909
1910 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1911 double predicted_pause_time_ms = base_time_ms;
1912 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1913
1914 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1915 "start choosing CSet",
1916 ergo_format_size("_pending_cards")
1917 ergo_format_ms("predicted base time")
1918 ergo_format_ms("remaining time")
1919 ergo_format_ms("target pause time"),
1920 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1921
1922 _last_gc_was_young = gcs_are_young() ? true : false;
1923
1924 if (_last_gc_was_young) {
1925 _trace_young_gen_time_data.increment_young_collection_count();
1926 } else {
1927 _trace_young_gen_time_data.increment_mixed_collection_count();
1928 }
1929
1930 // The young list is laid with the survivor regions from the previous
1931 // pause are appended to the RHS of the young list, i.e.
1932 // [Newly Young Regions ++ Survivors from last pause].
1933
1934 uint survivor_region_length = young_list->survivor_length();
1935 uint eden_region_length = young_list->length() - survivor_region_length;
1936 init_cset_region_lengths(eden_region_length, survivor_region_length);
1937
1938 HeapRegion* hr = young_list->first_survivor_region();
1939 while (hr != NULL) {
1940 assert(hr->is_survivor(), "badly formed young list");
1941 // There is a convention that all the young regions in the CSet
1942 // are tagged as "eden", so we do this for the survivors here. We
1943 // use the special set_eden_pre_gc() as it doesn't check that the
1944 // region is free (which is not the case here).
1955 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
1956
1957 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
1958 "add young regions to CSet",
1959 ergo_format_region("eden")
1960 ergo_format_region("survivors")
1961 ergo_format_ms("predicted young region time"),
1962 eden_region_length, survivor_region_length,
1963 _inc_cset_predicted_elapsed_time_ms);
1964
1965 // The number of recorded young regions is the incremental
1966 // collection set's current size
1967 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1968
1969 double young_end_time_sec = os::elapsedTime();
1970 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
1971
1972 // Set the start of the non-young choice time.
1973 double non_young_start_time_sec = young_end_time_sec;
1974
1975 if (!gcs_are_young()) {
1976 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1977 cset_chooser->verify();
1978 const uint min_old_cset_length = calc_min_old_cset_length();
1979 const uint max_old_cset_length = calc_max_old_cset_length();
1980
1981 uint expensive_region_num = 0;
1982 bool check_time_remaining = adaptive_young_list_length();
1983
1984 HeapRegion* hr = cset_chooser->peek();
1985 while (hr != NULL) {
1986 if (old_cset_region_length() >= max_old_cset_length) {
1987 // Added maximum number of old regions to the CSet.
1988 ergo_verbose2(ErgoCSetConstruction,
1989 "finish adding old regions to CSet",
1990 ergo_format_reason("old CSet region num reached max")
1991 ergo_format_region("old")
1992 ergo_format_region("max"),
1993 old_cset_region_length(), max_old_cset_length);
1994 break;
1995 }
2001 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2002 double threshold = (double) G1HeapWastePercent;
2003 if (reclaimable_perc <= threshold) {
2004 // We've added enough old regions that the amount of uncollected
2005 // reclaimable space is at or below the waste threshold. Stop
2006 // adding old regions to the CSet.
2007 ergo_verbose5(ErgoCSetConstruction,
2008 "finish adding old regions to CSet",
2009 ergo_format_reason("reclaimable percentage not over threshold")
2010 ergo_format_region("old")
2011 ergo_format_region("max")
2012 ergo_format_byte_perc("reclaimable")
2013 ergo_format_perc("threshold"),
2014 old_cset_region_length(),
2015 max_old_cset_length,
2016 reclaimable_bytes,
2017 reclaimable_perc, threshold);
2018 break;
2019 }
2020
2021 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
2022 if (check_time_remaining) {
2023 if (predicted_time_ms > time_remaining_ms) {
2024 // Too expensive for the current CSet.
2025
2026 if (old_cset_region_length() >= min_old_cset_length) {
2027 // We have added the minimum number of old regions to the CSet,
2028 // we are done with this CSet.
2029 ergo_verbose4(ErgoCSetConstruction,
2030 "finish adding old regions to CSet",
2031 ergo_format_reason("predicted time is too high")
2032 ergo_format_ms("predicted time")
2033 ergo_format_ms("remaining time")
2034 ergo_format_region("old")
2035 ergo_format_region("min"),
2036 predicted_time_ms, time_remaining_ms,
2037 old_cset_region_length(), min_old_cset_length);
2038 break;
2039 }
2040
2041 // We'll add it anyway given that we haven't reached the
|
95 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
96 _prev_collection_pause_end_ms(0.0),
97 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
98 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
99 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
100 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
104 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
105 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
106 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
107 _non_young_other_cost_per_region_ms_seq(
108 new TruncatedSeq(TruncatedSeqLength)),
109
110 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
111 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
112
113 _pause_time_target_ms((double) MaxGCPauseMillis),
114
115 _recent_prev_end_times_for_all_gcs_sec(
116 new TruncatedSeq(NumPrevPausesForHeuristics)),
117
118 _recent_avg_pause_time_ratio(0.0),
119
120 _eden_used_bytes_before_gc(0),
121 _survivor_used_bytes_before_gc(0),
122 _heap_used_bytes_before_gc(0),
123 _metaspace_used_bytes_before_gc(0),
124 _eden_capacity_bytes_before_gc(0),
125 _heap_capacity_bytes_before_gc(0),
126
127 _eden_cset_region_length(0),
128 _survivor_cset_region_length(0),
129 _old_cset_region_length(0),
130
131 _collection_set(NULL),
132 _collection_set_bytes_used_before(0),
133
134 // Incremental CSet attributes
135 _inc_cset_build_state(Inactive),
136 _inc_cset_head(NULL),
137 _inc_cset_tail(NULL),
138 _inc_cset_bytes_used_before(0),
139 _inc_cset_max_finger(NULL),
539 uint desired_min_length =
540 calculate_young_list_desired_min_length(base_min_length);
541 if (desired_min_length < absolute_min_length) {
542 desired_min_length = absolute_min_length;
543 }
544
545 // Calculate the absolute and desired max bounds.
546
547 // We will try our best not to "eat" into the reserve.
548 uint absolute_max_length = 0;
549 if (_free_regions_at_end_of_collection > _reserve_regions) {
550 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
551 }
552 uint desired_max_length = calculate_young_list_desired_max_length();
553 if (desired_max_length > absolute_max_length) {
554 desired_max_length = absolute_max_length;
555 }
556
557 uint young_list_target_length = 0;
558 if (adaptive_young_list_length()) {
559 if (collector_state()->gcs_are_young()) {
560 young_list_target_length =
561 calculate_young_list_target_length(rs_lengths,
562 base_min_length,
563 desired_min_length,
564 desired_max_length);
565 _rs_lengths_prediction = rs_lengths;
566 } else {
567 // Don't calculate anything and let the code below bound it to
568 // the desired_min_length, i.e., do the next GC as soon as
569 // possible to maximize how many old regions we can add to it.
570 }
571 } else {
572 // The user asked for a fixed young gen so we'll fix the young gen
573 // whether the next GC is young or mixed.
574 young_list_target_length = _young_list_fixed_length;
575 }
576
577 // Make sure we don't go over the desired max length, nor under the
578 // desired min length. In case they clash, desired_min_length wins
579 // which is why that test is second.
581 young_list_target_length = desired_max_length;
582 }
583 if (young_list_target_length < desired_min_length) {
584 young_list_target_length = desired_min_length;
585 }
586
587 assert(young_list_target_length > recorded_survivor_regions(),
588 "we should be able to allocate at least one eden region");
589 assert(young_list_target_length >= absolute_min_length, "post-condition");
590 _young_list_target_length = young_list_target_length;
591
592 update_max_gc_locker_expansion();
593 }
594
595 uint
596 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
597 uint base_min_length,
598 uint desired_min_length,
599 uint desired_max_length) {
600 assert(adaptive_young_list_length(), "pre-condition");
601 assert(collector_state()->gcs_are_young(), "only call this for young GCs");
602
603 // In case some edge-condition makes the desired max length too small...
604 if (desired_max_length <= desired_min_length) {
605 return desired_min_length;
606 }
607
608 // We'll adjust min_young_length and max_young_length not to include
609 // the already allocated young regions (i.e., so they reflect the
610 // min and max eden regions we'll allocate). The base_min_length
611 // will be reflected in the predictions by the
612 // survivor_regions_evac_time prediction.
613 assert(desired_min_length > base_min_length, "invariant");
614 uint min_young_length = desired_min_length - base_min_length;
615 assert(desired_max_length > base_min_length, "invariant");
616 uint max_young_length = desired_max_length - base_min_length;
617
618 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
619 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
620 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
621 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
684 base_free_regions, target_pause_time_ms),
685 "min_young_length, the result of the binary search, should "
686 "fit into the pause target");
687 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
688 base_free_regions, target_pause_time_ms),
689 "min_young_length, the result of the binary search, should be "
690 "optimal, so no larger length should fit into the pause target");
691 }
692 } else {
693 // Even the minimum length doesn't fit into the pause time
694 // target, return it as the result nevertheless.
695 }
696 return base_min_length + min_young_length;
697 }
698
699 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
700 double survivor_regions_evac_time = 0.0;
701 for (HeapRegion * r = _recorded_survivor_head;
702 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
703 r = r->get_next_young_region()) {
704 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
705 }
706 return survivor_regions_evac_time;
707 }
708
709 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
710 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
711
712 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
713 if (rs_lengths > _rs_lengths_prediction) {
714 // add 10% to avoid having to recalculate often
715 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
716 update_young_list_target_length(rs_lengths_prediction);
717 }
718 }
719
720
721
722 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
723 bool is_tlab,
724 bool* gc_overhead_limit_was_exceeded) {
787 record_heap_size_info_at_start(true /* full */);
788 // Release the future to-space so that it is available for compaction into.
789 _g1->set_full_collection();
790 }
791
792 void G1CollectorPolicy::record_full_collection_end() {
793 // Consider this like a collection pause for the purposes of allocation
794 // since last pause.
795 double end_sec = os::elapsedTime();
796 double full_gc_time_sec = end_sec - _full_collection_start_sec;
797 double full_gc_time_ms = full_gc_time_sec * 1000.0;
798
799 _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
800
801 update_recent_gc_times(end_sec, full_gc_time_ms);
802
803 _g1->clear_full_collection();
804
805 // "Nuke" the heuristics that control the young/mixed GC
806 // transitions and make sure we start with young GCs after the Full GC.
807 collector_state()->set_gcs_are_young(true);
808 collector_state()->set_last_young_gc(false);
809 collector_state()->set_initiate_conc_mark_if_possible(false);
810 collector_state()->set_during_initial_mark_pause(false);
811 collector_state()->set_in_marking_window(false);
812 collector_state()->set_in_marking_window_im(false);
813
814 _short_lived_surv_rate_group->start_adding_regions();
815 // also call this on any additional surv rate groups
816
817 record_survivor_regions(0, NULL, NULL);
818
819 _free_regions_at_end_of_collection = _g1->num_free_regions();
820 // Reset survivors SurvRateGroup.
821 _survivor_surv_rate_group->reset();
822 update_young_list_target_length();
823 _collectionSetChooser->clear();
824 }
825
826 void G1CollectorPolicy::record_stop_world_start() {
827 _stop_world_start = os::elapsedTime();
828 }
829
830 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
831 // We only need to do this here as the policy will only be applied
832 // to the GC we're about to start. so, no point is calculating this
833 // every time we calculate / recalculate the target young length.
834 update_survivors_policy();
835
836 assert(_g1->used() == _g1->recalculate_used(),
837 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
838 _g1->used(), _g1->recalculate_used()));
839
840 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
841 _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
842 _stop_world_start = 0.0;
843
844 record_heap_size_info_at_start(false /* full */);
845
846 phase_times()->record_cur_collection_start_sec(start_time_sec);
847 _pending_cards = _g1->pending_card_num();
848
849 _collection_set_bytes_used_before = 0;
850 _bytes_copied_during_gc = 0;
851
852 collector_state()->set_last_gc_was_young(false);
853
854 // do that for any other surv rate groups
855 _short_lived_surv_rate_group->stop_adding_regions();
856 _survivors_age_table.clear();
857
858 assert( verify_young_ages(), "region age verification" );
859 }
860
861 void G1CollectorPolicy::record_concurrent_mark_init_end(double
862 mark_init_elapsed_time_ms) {
863 collector_state()->set_during_marking(true);
864 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
865 collector_state()->set_during_initial_mark_pause(false);
866 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
867 }
868
869 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
870 _mark_remark_start_sec = os::elapsedTime();
871 collector_state()->set_during_marking(false);
872 }
873
874 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
875 double end_time_sec = os::elapsedTime();
876 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
877 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
878 _cur_mark_stop_world_time_ms += elapsed_time_ms;
879 _prev_collection_pause_end_ms += elapsed_time_ms;
880
881 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
882 }
883
884 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
885 _mark_cleanup_start_sec = os::elapsedTime();
886 }
887
888 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
889 collector_state()->set_last_young_gc(true);
890 collector_state()->set_in_marking_window(false);
891 }
892
893 void G1CollectorPolicy::record_concurrent_pause() {
894 if (_stop_world_start > 0.0) {
895 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
896 _trace_young_gen_time_data.record_yield_time(yield_ms);
897 }
898 }
899
900 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
901 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
902 return false;
903 }
904
905 size_t marking_initiating_used_threshold =
906 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
907 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
908 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
909
910 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
911 if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
912 ergo_verbose5(ErgoConcCycles,
913 "request concurrent cycle initiation",
914 ergo_format_reason("occupancy higher than threshold")
915 ergo_format_byte("occupancy")
916 ergo_format_byte("allocation request")
917 ergo_format_byte_perc("threshold")
918 ergo_format_str("source"),
919 cur_used_bytes,
920 alloc_byte_size,
921 marking_initiating_used_threshold,
922 (double) InitiatingHeapOccupancyPercent,
923 source);
924 return true;
925 } else {
926 ergo_verbose5(ErgoConcCycles,
927 "do not request concurrent cycle initiation",
928 ergo_format_reason("still doing mixed collections")
929 ergo_format_byte("occupancy")
930 ergo_format_byte("allocation request")
931 ergo_format_byte_perc("threshold")
946
947 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
948 double end_time_sec = os::elapsedTime();
949 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
950 "otherwise, the subtraction below does not make sense");
951 size_t rs_size =
952 _cur_collection_pause_used_regions_at_start - cset_region_length();
953 size_t cur_used_bytes = _g1->used();
954 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
955 bool last_pause_included_initial_mark = false;
956 bool update_stats = !_g1->evacuation_failed();
957
958 #ifndef PRODUCT
959 if (G1YoungSurvRateVerbose) {
960 gclog_or_tty->cr();
961 _short_lived_surv_rate_group->print();
962 // do that for any other surv rate groups too
963 }
964 #endif // PRODUCT
965
966 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
967 if (last_pause_included_initial_mark) {
968 record_concurrent_mark_init_end(0.0);
969 } else if (need_to_start_conc_mark("end of GC")) {
970 // Note: this might have already been set, if during the last
971 // pause we decided to start a cycle but at the beginning of
972 // this pause we decided to postpone it. That's OK.
973 collector_state()->set_initiate_conc_mark_if_possible(true);
974 }
975
976 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
977 end_time_sec, false);
978
979 evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
980 evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
981
982 if (update_stats) {
983 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
984 // this is where we update the allocation rate of the application
985 double app_time_ms =
986 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
987 if (app_time_ms < MIN_TIMER_GRANULARITY) {
988 // This usually happens due to the timer not having the required
989 // granularity. Some Linuxes are the usual culprits.
990 // We'll just set it to something (arbitrarily) small.
991 app_time_ms = 1.0;
992 }
993 // We maintain the invariant that all objects allocated by mutator
1015 gclog_or_tty->print_cr("Recent GC Times (ms):");
1016 _recent_gc_times_ms->dump();
1017 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
1018 _recent_prev_end_times_for_all_gcs_sec->dump();
1019 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
1020 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
1021 // In debug mode, terminate the JVM if the user wants to debug at this point.
1022 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
1023 #endif // !PRODUCT
1024 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1025 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1026 if (_recent_avg_pause_time_ratio < 0.0) {
1027 _recent_avg_pause_time_ratio = 0.0;
1028 } else {
1029 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1030 _recent_avg_pause_time_ratio = 1.0;
1031 }
1032 }
1033 }
1034
1035 bool new_in_marking_window = collector_state()->in_marking_window();
1036 bool new_in_marking_window_im = false;
1037 if (last_pause_included_initial_mark) {
1038 new_in_marking_window = true;
1039 new_in_marking_window_im = true;
1040 }
1041
1042 if (collector_state()->last_young_gc()) {
1043 // This is supposed to to be the "last young GC" before we start
1044 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1045
1046 if (!last_pause_included_initial_mark) {
1047 if (next_gc_should_be_mixed("start mixed GCs",
1048 "do not start mixed GCs")) {
1049 collector_state()->set_gcs_are_young(false);
1050 }
1051 } else {
1052 ergo_verbose0(ErgoMixedGCs,
1053 "do not start mixed GCs",
1054 ergo_format_reason("concurrent cycle is about to start"));
1055 }
1056 collector_state()->set_last_young_gc(false);
1057 }
1058
1059 if (!collector_state()->last_gc_was_young()) {
1060 // This is a mixed GC. Here we decide whether to continue doing
1061 // mixed GCs or not.
1062
1063 if (!next_gc_should_be_mixed("continue mixed GCs",
1064 "do not continue mixed GCs")) {
1065 collector_state()->set_gcs_are_young(true);
1066 }
1067 }
1068
1069 _short_lived_surv_rate_group->start_adding_regions();
1070 // Do that for any other surv rate groups
1071
1072 if (update_stats) {
1073 double cost_per_card_ms = 0.0;
1074 if (_pending_cards > 0) {
1075 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
1076 _cost_per_card_ms_seq->add(cost_per_card_ms);
1077 }
1078
1079 size_t cards_scanned = _g1->cards_scanned();
1080
1081 double cost_per_entry_ms = 0.0;
1082 if (cards_scanned > 10) {
1083 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
1084 if (collector_state()->last_gc_was_young()) {
1085 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1086 } else {
1087 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1088 }
1089 }
1090
1091 if (_max_rs_lengths > 0) {
1092 double cards_per_entry_ratio =
1093 (double) cards_scanned / (double) _max_rs_lengths;
1094 if (collector_state()->last_gc_was_young()) {
1095 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1096 } else {
1097 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1098 }
1099 }
1100
1101 // This is defensive. For a while _max_rs_lengths could get
1102 // smaller than _recorded_rs_lengths which was causing
1103 // rs_length_diff to get very large and mess up the RSet length
1104 // predictions. The reason was unsafe concurrent updates to the
1105 // _inc_cset_recorded_rs_lengths field which the code below guards
1106 // against (see CR 7118202). This bug has now been fixed (see CR
1107 // 7119027). However, I'm still worried that
1108 // _inc_cset_recorded_rs_lengths might still end up somewhat
1109 // inaccurate. The concurrent refinement thread calculates an
1110 // RSet's length concurrently with other CR threads updating it
1111 // which might cause it to calculate the length incorrectly (if,
1112 // say, it's in mid-coarsening). So I'll leave in the defensive
1113 // conditional below just in case.
1114 size_t rs_length_diff = 0;
1115 if (_max_rs_lengths > _recorded_rs_lengths) {
1116 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1117 }
1118 _rs_length_diff_seq->add((double) rs_length_diff);
1119
1120 size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
1121 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
1122 double cost_per_byte_ms = 0.0;
1123
1124 if (copied_bytes > 0) {
1125 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
1126 if (collector_state()->in_marking_window()) {
1127 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1128 } else {
1129 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1130 }
1131 }
1132
1133 double all_other_time_ms = pause_time_ms -
1134 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
1135 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
1136
1137 double young_other_time_ms = 0.0;
1138 if (young_cset_region_length() > 0) {
1139 young_other_time_ms =
1140 phase_times()->young_cset_choice_time_ms() +
1141 phase_times()->young_free_cset_time_ms();
1142 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1143 (double) young_cset_region_length());
1144 }
1145 double non_young_other_time_ms = 0.0;
1146 if (old_cset_region_length() > 0) {
1149 phase_times()->non_young_free_cset_time_ms();
1150
1151 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1152 (double) old_cset_region_length());
1153 }
1154
1155 double constant_other_time_ms = all_other_time_ms -
1156 (young_other_time_ms + non_young_other_time_ms);
1157 _constant_other_time_ms_seq->add(constant_other_time_ms);
1158
1159 double survival_ratio = 0.0;
1160 if (_collection_set_bytes_used_before > 0) {
1161 survival_ratio = (double) _bytes_copied_during_gc /
1162 (double) _collection_set_bytes_used_before;
1163 }
1164
1165 _pending_cards_seq->add((double) _pending_cards);
1166 _rs_lengths_seq->add((double) _max_rs_lengths);
1167 }
1168
1169 collector_state()->set_in_marking_window(new_in_marking_window);
1170 collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1171 _free_regions_at_end_of_collection = _g1->num_free_regions();
1172 update_young_list_target_length();
1173
1174 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1175 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1176 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
1177 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
1178
1179 _collectionSetChooser->verify();
1180 }
1181
1182 #define EXT_SIZE_FORMAT "%.1f%s"
1183 #define EXT_SIZE_PARAMS(bytes) \
1184 byte_size_in_proper_unit((double)(bytes)), \
1185 proper_unit_for_byte_size((bytes))
1186
1187 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1188 YoungList* young_list = _g1->young_list();
1189 _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1190 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1278 dcqs.set_completed_queue_padding(curr_queue_size);
1279 } else {
1280 dcqs.set_completed_queue_padding(0);
1281 }
1282 dcqs.notify_if_necessary();
1283 }
1284
1285 double
1286 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1287 size_t scanned_cards) {
1288 return
1289 predict_rs_update_time_ms(pending_cards) +
1290 predict_rs_scan_time_ms(scanned_cards) +
1291 predict_constant_other_time_ms();
1292 }
1293
1294 double
1295 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1296 size_t rs_length = predict_rs_length_diff();
1297 size_t card_num;
1298 if (collector_state()->gcs_are_young()) {
1299 card_num = predict_young_card_num(rs_length);
1300 } else {
1301 card_num = predict_non_young_card_num(rs_length);
1302 }
1303 return predict_base_elapsed_time_ms(pending_cards, card_num);
1304 }
1305
1306 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1307 size_t bytes_to_copy;
1308 if (hr->is_marked())
1309 bytes_to_copy = hr->max_live_bytes();
1310 else {
1311 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1312 int age = hr->age_in_surv_rate_group();
1313 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1314 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1315 }
1316 return bytes_to_copy;
1317 }
1318
1444 void G1CollectorPolicy::update_survivors_policy() {
1445 double max_survivor_regions_d =
1446 (double) _young_list_target_length / (double) SurvivorRatio;
1447 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1448 // smaller than 1.0) we'll get 1.
1449 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1450
1451 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1452 HeapRegion::GrainWords * _max_survivor_regions);
1453 }
1454
1455 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1456 GCCause::Cause gc_cause) {
1457 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1458 if (!during_cycle) {
1459 ergo_verbose1(ErgoConcCycles,
1460 "request concurrent cycle initiation",
1461 ergo_format_reason("requested by GC cause")
1462 ergo_format_str("GC cause"),
1463 GCCause::to_string(gc_cause));
1464 collector_state()->set_initiate_conc_mark_if_possible(true);
1465 return true;
1466 } else {
1467 ergo_verbose1(ErgoConcCycles,
1468 "do not request concurrent cycle initiation",
1469 ergo_format_reason("concurrent cycle already in progress")
1470 ergo_format_str("GC cause"),
1471 GCCause::to_string(gc_cause));
1472 return false;
1473 }
1474 }
1475
1476 void
1477 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1478 // We are about to decide on whether this pause will be an
1479 // initial-mark pause.
1480
1481 // First, collector_state()->during_initial_mark_pause() should not be already set. We
1482 // will set it here if we have to. However, it should be cleared by
1483 // the end of the pause (it's only set for the duration of an
1484 // initial-mark pause).
1485 assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1486
1487 if (collector_state()->initiate_conc_mark_if_possible()) {
1488 // We had noticed on a previous pause that the heap occupancy has
1489 // gone over the initiating threshold and we should start a
1490 // concurrent marking cycle. So we might initiate one.
1491
1492 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1493 if (!during_cycle) {
1494 // The concurrent marking thread is not "during a cycle", i.e.,
1495 // it has completed the last one. So we can go ahead and
1496 // initiate a new cycle.
1497
1498 collector_state()->set_during_initial_mark_pause(true);
1499 // We do not allow mixed GCs during marking.
1500 if (!collector_state()->gcs_are_young()) {
1501 collector_state()->set_gcs_are_young(true);
1502 ergo_verbose0(ErgoMixedGCs,
1503 "end mixed GCs",
1504 ergo_format_reason("concurrent cycle is about to start"));
1505 }
1506
1507 // And we can now clear initiate_conc_mark_if_possible() as
1508 // we've already acted on it.
1509 collector_state()->set_initiate_conc_mark_if_possible(false);
1510
1511 ergo_verbose0(ErgoConcCycles,
1512 "initiate concurrent cycle",
1513 ergo_format_reason("concurrent cycle initiation requested"));
1514 } else {
1515 // The concurrent marking thread is still finishing up the
1516 // previous cycle. If we start one right now the two cycles
1517 // overlap. In particular, the concurrent marking thread might
1518 // be in the process of clearing the next marking bitmap (which
1519 // we will use for the next cycle if we start one). Starting a
1520 // cycle now will be bad given that parts of the marking
1521 // information might get cleared by the marking thread. And we
1522 // cannot wait for the marking thread to finish the cycle as it
1523 // periodically yields while clearing the next marking bitmap
1524 // and, if it's in a yield point, it's waiting for us to
1525 // finish. So, at this point we will not start a cycle and we'll
1526 // let the concurrent marking thread complete the last one.
1527 ergo_verbose0(ErgoConcCycles,
1528 "do not initiate concurrent cycle",
1529 ergo_format_reason("concurrent cycle already in progress"));
1661 _inc_cset_predicted_elapsed_time_ms +=
1662 _inc_cset_predicted_elapsed_time_ms_diffs;
1663
1664 _inc_cset_recorded_rs_lengths_diffs = 0;
1665 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1666 }
1667
1668 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1669 // This routine is used when:
1670 // * adding survivor regions to the incremental cset at the end of an
1671 // evacuation pause,
1672 // * adding the current allocation region to the incremental cset
1673 // when it is retired, and
1674 // * updating existing policy information for a region in the
1675 // incremental cset via young list RSet sampling.
1676 // Therefore this routine may be called at a safepoint by the
1677 // VM thread, or in-between safepoints by mutator threads (when
1678 // retiring the current allocation region) or a concurrent
1679 // refine thread (RSet sampling).
1680
1681 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
1682 size_t used_bytes = hr->used();
1683 _inc_cset_recorded_rs_lengths += rs_length;
1684 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1685 _inc_cset_bytes_used_before += used_bytes;
1686
1687 // Cache the values we have added to the aggregated information
1688 // in the heap region in case we have to remove this region from
1689 // the incremental collection set, or it is updated by the
1690 // rset sampling code
1691 hr->set_recorded_rs_length(rs_length);
1692 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1693 }
1694
1695 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1696 size_t new_rs_length) {
1697 // Update the CSet information that is dependent on the new RS length
1698 assert(hr->is_young(), "Precondition");
1699 assert(!SafepointSynchronize::is_at_safepoint(),
1700 "should not be at a safepoint");
1701
1702 // We could have updated _inc_cset_recorded_rs_lengths and
1703 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1704 // that atomically, as this code is executed by a concurrent
1705 // refinement thread, potentially concurrently with a mutator thread
1706 // allocating a new region and also updating the same fields. To
1707 // avoid the atomic operations we accumulate these updates on two
1708 // separate fields (*_diffs) and we'll just add them to the "main"
1709 // fields at the start of a GC.
1710
1711 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1712 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1713 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1714
1715 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1716 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
1717 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1718 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1719
1720 hr->set_recorded_rs_length(new_rs_length);
1721 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1722 }
1723
1724 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1725 assert(hr->is_young(), "invariant");
1726 assert(hr->young_index_in_cset() > -1, "should have already been set");
1727 assert(_inc_cset_build_state == Active, "Precondition");
1728
1729 // We need to clear and set the cached recorded/cached collection set
1730 // information in the heap region here (before the region gets added
1731 // to the collection set). An individual heap region's cached values
1732 // are calculated, aggregated with the policy collection set info,
1733 // and cached in the heap region here (initially) and (subsequently)
1734 // by the Young List sampling code.
1735
1736 size_t rs_length = hr->rem_set()->occupied();
1891 YoungList* young_list = _g1->young_list();
1892 finalize_incremental_cset_building();
1893
1894 guarantee(target_pause_time_ms > 0.0,
1895 err_msg("target_pause_time_ms = %1.6lf should be positive",
1896 target_pause_time_ms));
1897 guarantee(_collection_set == NULL, "Precondition");
1898
1899 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1900 double predicted_pause_time_ms = base_time_ms;
1901 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1902
1903 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1904 "start choosing CSet",
1905 ergo_format_size("_pending_cards")
1906 ergo_format_ms("predicted base time")
1907 ergo_format_ms("remaining time")
1908 ergo_format_ms("target pause time"),
1909 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1910
1911 collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young() ? true : false);
1912
1913 if (collector_state()->last_gc_was_young()) {
1914 _trace_young_gen_time_data.increment_young_collection_count();
1915 } else {
1916 _trace_young_gen_time_data.increment_mixed_collection_count();
1917 }
1918
1919 // The young list is laid with the survivor regions from the previous
1920 // pause are appended to the RHS of the young list, i.e.
1921 // [Newly Young Regions ++ Survivors from last pause].
1922
1923 uint survivor_region_length = young_list->survivor_length();
1924 uint eden_region_length = young_list->length() - survivor_region_length;
1925 init_cset_region_lengths(eden_region_length, survivor_region_length);
1926
1927 HeapRegion* hr = young_list->first_survivor_region();
1928 while (hr != NULL) {
1929 assert(hr->is_survivor(), "badly formed young list");
1930 // There is a convention that all the young regions in the CSet
1931 // are tagged as "eden", so we do this for the survivors here. We
1932 // use the special set_eden_pre_gc() as it doesn't check that the
1933 // region is free (which is not the case here).
1944 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
1945
1946 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
1947 "add young regions to CSet",
1948 ergo_format_region("eden")
1949 ergo_format_region("survivors")
1950 ergo_format_ms("predicted young region time"),
1951 eden_region_length, survivor_region_length,
1952 _inc_cset_predicted_elapsed_time_ms);
1953
1954 // The number of recorded young regions is the incremental
1955 // collection set's current size
1956 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1957
1958 double young_end_time_sec = os::elapsedTime();
1959 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
1960
1961 // Set the start of the non-young choice time.
1962 double non_young_start_time_sec = young_end_time_sec;
1963
1964 if (!collector_state()->gcs_are_young()) {
1965 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1966 cset_chooser->verify();
1967 const uint min_old_cset_length = calc_min_old_cset_length();
1968 const uint max_old_cset_length = calc_max_old_cset_length();
1969
1970 uint expensive_region_num = 0;
1971 bool check_time_remaining = adaptive_young_list_length();
1972
1973 HeapRegion* hr = cset_chooser->peek();
1974 while (hr != NULL) {
1975 if (old_cset_region_length() >= max_old_cset_length) {
1976 // Added maximum number of old regions to the CSet.
1977 ergo_verbose2(ErgoCSetConstruction,
1978 "finish adding old regions to CSet",
1979 ergo_format_reason("old CSet region num reached max")
1980 ergo_format_region("old")
1981 ergo_format_region("max"),
1982 old_cset_region_length(), max_old_cset_length);
1983 break;
1984 }
1990 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1991 double threshold = (double) G1HeapWastePercent;
1992 if (reclaimable_perc <= threshold) {
1993 // We've added enough old regions that the amount of uncollected
1994 // reclaimable space is at or below the waste threshold. Stop
1995 // adding old regions to the CSet.
1996 ergo_verbose5(ErgoCSetConstruction,
1997 "finish adding old regions to CSet",
1998 ergo_format_reason("reclaimable percentage not over threshold")
1999 ergo_format_region("old")
2000 ergo_format_region("max")
2001 ergo_format_byte_perc("reclaimable")
2002 ergo_format_perc("threshold"),
2003 old_cset_region_length(),
2004 max_old_cset_length,
2005 reclaimable_bytes,
2006 reclaimable_perc, threshold);
2007 break;
2008 }
2009
2010 double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2011 if (check_time_remaining) {
2012 if (predicted_time_ms > time_remaining_ms) {
2013 // Too expensive for the current CSet.
2014
2015 if (old_cset_region_length() >= min_old_cset_length) {
2016 // We have added the minimum number of old regions to the CSet,
2017 // we are done with this CSet.
2018 ergo_verbose4(ErgoCSetConstruction,
2019 "finish adding old regions to CSet",
2020 ergo_format_reason("predicted time is too high")
2021 ergo_format_ms("predicted time")
2022 ergo_format_ms("remaining time")
2023 ergo_format_region("old")
2024 ergo_format_region("min"),
2025 predicted_time_ms, time_remaining_ms,
2026 old_cset_region_length(), min_old_cset_length);
2027 break;
2028 }
2029
2030 // We'll add it anyway given that we haven't reached the
|