100 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
104 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
105 _non_young_other_cost_per_region_ms_seq(
106 new TruncatedSeq(TruncatedSeqLength)),
107
108 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
109 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
110
111 _pause_time_target_ms((double) MaxGCPauseMillis),
112
113 _recent_prev_end_times_for_all_gcs_sec(
114 new TruncatedSeq(NumPrevPausesForHeuristics)),
115
116 _recent_avg_pause_time_ratio(0.0),
117 _rs_lengths_prediction(0),
118 _max_survivor_regions(0),
119
120 _eden_used_bytes_before_gc(0),
121 _survivor_used_bytes_before_gc(0),
122 _old_used_bytes_before_gc(0),
123 _humongous_used_bytes_before_gc(0),
124 _heap_used_bytes_before_gc(0),
125 _metaspace_used_bytes_before_gc(0),
126 _eden_capacity_bytes_before_gc(0),
127 _heap_capacity_bytes_before_gc(0),
128
129 _eden_cset_region_length(0),
130 _survivor_cset_region_length(0),
131 _old_cset_region_length(0),
132
133 _collection_set(NULL),
134 _collection_set_bytes_used_before(0),
135
136 // Incremental CSet attributes
137 _inc_cset_build_state(Inactive),
138 _inc_cset_head(NULL),
139 _inc_cset_tail(NULL),
140 _inc_cset_bytes_used_before(0),
141 _inc_cset_max_finger(NULL),
142 _inc_cset_recorded_rs_lengths(0),
143 _inc_cset_recorded_rs_lengths_diffs(0),
144 _inc_cset_predicted_elapsed_time_ms(0.0),
145 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
146
147 // add here any more surv rate groups
148 _recorded_survivor_regions(0),
792
793 if (age < 0) {
794 log_info(gc, verify)("## %s: encountered negative age", name);
795 ret = false;
796 }
797
798 if (age <= prev_age) {
799 log_info(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);
800 ret = false;
801 }
802 prev_age = age;
803 }
804 }
805
806 return ret;
807 }
808 #endif // PRODUCT
809
810 void G1CollectorPolicy::record_full_collection_start() {
811 _full_collection_start_sec = os::elapsedTime();
812 record_heap_size_info_at_start(true /* full */);
813 // Release the future to-space so that it is available for compaction into.
814 collector_state()->set_full_collection(true);
815 }
816
817 void G1CollectorPolicy::record_full_collection_end() {
818 // Consider this like a collection pause for the purposes of allocation
819 // since last pause.
820 double end_sec = os::elapsedTime();
821 double full_gc_time_sec = end_sec - _full_collection_start_sec;
822 double full_gc_time_ms = full_gc_time_sec * 1000.0;
823
824 _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
825
826 update_recent_gc_times(end_sec, full_gc_time_ms);
827
828 collector_state()->set_full_collection(false);
829
830 // "Nuke" the heuristics that control the young/mixed GC
831 // transitions and make sure we start with young GCs after the Full GC.
832 collector_state()->set_gcs_are_young(true);
854 }
855
856 void G1CollectorPolicy::record_stop_world_start() {
857 _stop_world_start = os::elapsedTime();
858 }
859
860 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
861 // We only need to do this here as the policy will only be applied
862 // to the GC we're about to start. so, no point is calculating this
863 // every time we calculate / recalculate the target young length.
864 update_survivors_policy();
865
866 assert(_g1->used() == _g1->recalculate_used(),
867 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
868 _g1->used(), _g1->recalculate_used());
869
870 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
871 _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
872 _stop_world_start = 0.0;
873
874 record_heap_size_info_at_start(false /* full */);
875
876 phase_times()->record_cur_collection_start_sec(start_time_sec);
877 _pending_cards = _g1->pending_card_num();
878
879 _collection_set_bytes_used_before = 0;
880 _bytes_copied_during_gc = 0;
881
882 collector_state()->set_last_gc_was_young(false);
883
884 // do that for any other surv rate groups
885 _short_lived_surv_rate_group->stop_adding_regions();
886 _survivors_age_table.clear();
887
888 assert( verify_young_ages(), "region age verification" );
889 }
890
891 void G1CollectorPolicy::record_concurrent_mark_init_end(double
892 mark_init_elapsed_time_ms) {
893 collector_state()->set_during_marking(true);
894 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
895 collector_state()->set_during_initial_mark_pause(false);
970 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
971
972 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
973 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
974 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
975
976 bool result = false;
977 if (marking_request_bytes > marking_initiating_used_threshold) {
978 result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
979 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
980 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
981 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
982 }
983
984 return result;
985 }
986
987 // Anything below that is considered to be zero
988 #define MIN_TIMER_GRANULARITY 0.0000001
989
990 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
991 double end_time_sec = os::elapsedTime();
992
993 size_t cur_used_bytes = _g1->used();
994 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
995 bool last_pause_included_initial_mark = false;
996 bool update_stats = !_g1->evacuation_failed();
997
998 NOT_PRODUCT(_short_lived_surv_rate_group->print());
999
1000 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
1001
1002 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1003 if (last_pause_included_initial_mark) {
1004 record_concurrent_mark_init_end(0.0);
1005 } else {
1006 maybe_start_marking();
1007 }
1008
1009 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1010 if (app_time_ms < MIN_TIMER_GRANULARITY) {
1121
1122 // This is defensive. For a while _max_rs_lengths could get
1123 // smaller than _recorded_rs_lengths which was causing
1124 // rs_length_diff to get very large and mess up the RSet length
1125 // predictions. The reason was unsafe concurrent updates to the
1126 // _inc_cset_recorded_rs_lengths field which the code below guards
1127 // against (see CR 7118202). This bug has now been fixed (see CR
1128 // 7119027). However, I'm still worried that
1129 // _inc_cset_recorded_rs_lengths might still end up somewhat
1130 // inaccurate. The concurrent refinement thread calculates an
1131 // RSet's length concurrently with other CR threads updating it
1132 // which might cause it to calculate the length incorrectly (if,
1133 // say, it's in mid-coarsening). So I'll leave in the defensive
1134 // conditional below just in case.
1135 size_t rs_length_diff = 0;
1136 if (_max_rs_lengths > _recorded_rs_lengths) {
1137 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1138 }
1139 _rs_length_diff_seq->add((double) rs_length_diff);
1140
1141 size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
1142 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
1143 double cost_per_byte_ms = 0.0;
1144
1145 if (copied_bytes > 0) {
1146 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
1147 if (collector_state()->in_marking_window()) {
1148 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1149 } else {
1150 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1151 }
1152 }
1153
1154 if (young_cset_region_length() > 0) {
1155 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
1156 young_cset_region_length());
1157 }
1158
1159 if (old_cset_region_length() > 0) {
1160 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
1161 old_cset_region_length());
1247 // marking, which makes any prediction useless. This increases the accuracy of the
1248 // prediction.
1249 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
1250 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
1251 report = true;
1252 }
1253
1254 if (report) {
1255 report_ihop_statistics();
1256 }
1257 }
1258
1259 void G1CollectorPolicy::report_ihop_statistics() {
1260 _ihop_control->print();
1261 }
1262
1263 #define EXT_SIZE_FORMAT "%.1f%s"
1264 #define EXT_SIZE_PARAMS(bytes) \
1265 byte_size_in_proper_unit((double)(bytes)), \
1266 proper_unit_for_byte_size((bytes))
1267
1268 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1269 YoungList* young_list = _g1->young_list();
1270 _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1271 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1272 _heap_capacity_bytes_before_gc = _g1->capacity();
1273 _old_used_bytes_before_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
1274 _humongous_used_bytes_before_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
1275 _heap_used_bytes_before_gc = _g1->used();
1276 _eden_capacity_bytes_before_gc = (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1277 _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1278 }
1279
1280 void G1CollectorPolicy::print_detailed_heap_transition() const {
1281 YoungList* young_list = _g1->young_list();
1282
1283 size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
1284 size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
1285 size_t heap_used_bytes_after_gc = _g1->used();
1286 size_t old_used_bytes_after_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
1287 size_t humongous_used_bytes_after_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
1288
1289 size_t heap_capacity_bytes_after_gc = _g1->capacity();
1290 size_t eden_capacity_bytes_after_gc =
1291 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
1292 size_t survivor_capacity_bytes_after_gc = _max_survivor_regions * HeapRegion::GrainBytes;
1293
1294 log_info(gc, heap)("Eden: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1295 _eden_used_bytes_before_gc / K, eden_used_bytes_after_gc /K, eden_capacity_bytes_after_gc /K);
1296 log_info(gc, heap)("Survivor: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1297 _survivor_used_bytes_before_gc / K, survivor_used_bytes_after_gc /K, survivor_capacity_bytes_after_gc /K);
1298 log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
1299 _old_used_bytes_before_gc / K, old_used_bytes_after_gc /K);
1300 log_info(gc, heap)("Humongous: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
1301 _humongous_used_bytes_before_gc / K, humongous_used_bytes_after_gc /K);
1302
1303 MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
1304 }
1305
1306 void G1CollectorPolicy::print_phases() {
1307 phase_times()->print();
1308 }
1309
1310 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1311 double update_rs_processed_buffers,
1312 double goal_ms) {
1313 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1314 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1315
1316 if (G1UseAdaptiveConcRefinement) {
1317 const int k_gy = 3, k_gr = 6;
1318 const double inc_k = 1.1, dec_k = 0.9;
1319
1320 int g = cg1r->green_zone();
1321 if (update_rs_time > goal_ms) {
1322 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
1323 } else {
1324 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
|
100 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
104 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
105 _non_young_other_cost_per_region_ms_seq(
106 new TruncatedSeq(TruncatedSeqLength)),
107
108 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
109 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
110
111 _pause_time_target_ms((double) MaxGCPauseMillis),
112
113 _recent_prev_end_times_for_all_gcs_sec(
114 new TruncatedSeq(NumPrevPausesForHeuristics)),
115
116 _recent_avg_pause_time_ratio(0.0),
117 _rs_lengths_prediction(0),
118 _max_survivor_regions(0),
119
120 _eden_cset_region_length(0),
121 _survivor_cset_region_length(0),
122 _old_cset_region_length(0),
123
124 _collection_set(NULL),
125 _collection_set_bytes_used_before(0),
126
127 // Incremental CSet attributes
128 _inc_cset_build_state(Inactive),
129 _inc_cset_head(NULL),
130 _inc_cset_tail(NULL),
131 _inc_cset_bytes_used_before(0),
132 _inc_cset_max_finger(NULL),
133 _inc_cset_recorded_rs_lengths(0),
134 _inc_cset_recorded_rs_lengths_diffs(0),
135 _inc_cset_predicted_elapsed_time_ms(0.0),
136 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
137
138 // add here any more surv rate groups
139 _recorded_survivor_regions(0),
783
784 if (age < 0) {
785 log_info(gc, verify)("## %s: encountered negative age", name);
786 ret = false;
787 }
788
789 if (age <= prev_age) {
790 log_info(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);
791 ret = false;
792 }
793 prev_age = age;
794 }
795 }
796
797 return ret;
798 }
799 #endif // PRODUCT
800
801 void G1CollectorPolicy::record_full_collection_start() {
802 _full_collection_start_sec = os::elapsedTime();
803 // Release the future to-space so that it is available for compaction into.
804 collector_state()->set_full_collection(true);
805 }
806
807 void G1CollectorPolicy::record_full_collection_end() {
808 // Consider this like a collection pause for the purposes of allocation
809 // since last pause.
810 double end_sec = os::elapsedTime();
811 double full_gc_time_sec = end_sec - _full_collection_start_sec;
812 double full_gc_time_ms = full_gc_time_sec * 1000.0;
813
814 _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
815
816 update_recent_gc_times(end_sec, full_gc_time_ms);
817
818 collector_state()->set_full_collection(false);
819
820 // "Nuke" the heuristics that control the young/mixed GC
821 // transitions and make sure we start with young GCs after the Full GC.
822 collector_state()->set_gcs_are_young(true);
844 }
845
846 void G1CollectorPolicy::record_stop_world_start() {
847 _stop_world_start = os::elapsedTime();
848 }
849
850 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
851 // We only need to do this here as the policy will only be applied
852 // to the GC we're about to start. so, no point is calculating this
853 // every time we calculate / recalculate the target young length.
854 update_survivors_policy();
855
856 assert(_g1->used() == _g1->recalculate_used(),
857 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
858 _g1->used(), _g1->recalculate_used());
859
860 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
861 _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
862 _stop_world_start = 0.0;
863
864 phase_times()->record_cur_collection_start_sec(start_time_sec);
865 _pending_cards = _g1->pending_card_num();
866
867 _collection_set_bytes_used_before = 0;
868 _bytes_copied_during_gc = 0;
869
870 collector_state()->set_last_gc_was_young(false);
871
872 // do that for any other surv rate groups
873 _short_lived_surv_rate_group->stop_adding_regions();
874 _survivors_age_table.clear();
875
876 assert( verify_young_ages(), "region age verification" );
877 }
878
879 void G1CollectorPolicy::record_concurrent_mark_init_end(double
880 mark_init_elapsed_time_ms) {
881 collector_state()->set_during_marking(true);
882 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
883 collector_state()->set_during_initial_mark_pause(false);
958 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
959
960 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
961 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
962 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
963
964 bool result = false;
965 if (marking_request_bytes > marking_initiating_used_threshold) {
966 result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
967 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
968 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
969 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
970 }
971
972 return result;
973 }
974
975 // Anything below that is considered to be zero
976 #define MIN_TIMER_GRANULARITY 0.0000001
977
978 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
979 double end_time_sec = os::elapsedTime();
980
981 size_t cur_used_bytes = _g1->used();
982 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
983 bool last_pause_included_initial_mark = false;
984 bool update_stats = !_g1->evacuation_failed();
985
986 NOT_PRODUCT(_short_lived_surv_rate_group->print());
987
988 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
989
990 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
991 if (last_pause_included_initial_mark) {
992 record_concurrent_mark_init_end(0.0);
993 } else {
994 maybe_start_marking();
995 }
996
997 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
998 if (app_time_ms < MIN_TIMER_GRANULARITY) {
1109
1110 // This is defensive. For a while _max_rs_lengths could get
1111 // smaller than _recorded_rs_lengths which was causing
1112 // rs_length_diff to get very large and mess up the RSet length
1113 // predictions. The reason was unsafe concurrent updates to the
1114 // _inc_cset_recorded_rs_lengths field which the code below guards
1115 // against (see CR 7118202). This bug has now been fixed (see CR
1116 // 7119027). However, I'm still worried that
1117 // _inc_cset_recorded_rs_lengths might still end up somewhat
1118 // inaccurate. The concurrent refinement thread calculates an
1119 // RSet's length concurrently with other CR threads updating it
1120 // which might cause it to calculate the length incorrectly (if,
1121 // say, it's in mid-coarsening). So I'll leave in the defensive
1122 // conditional below just in case.
1123 size_t rs_length_diff = 0;
1124 if (_max_rs_lengths > _recorded_rs_lengths) {
1125 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1126 }
1127 _rs_length_diff_seq->add((double) rs_length_diff);
1128
1129 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
1130 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
1131 double cost_per_byte_ms = 0.0;
1132
1133 if (copied_bytes > 0) {
1134 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
1135 if (collector_state()->in_marking_window()) {
1136 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1137 } else {
1138 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1139 }
1140 }
1141
1142 if (young_cset_region_length() > 0) {
1143 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
1144 young_cset_region_length());
1145 }
1146
1147 if (old_cset_region_length() > 0) {
1148 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
1149 old_cset_region_length());
1235 // marking, which makes any prediction useless. This increases the accuracy of the
1236 // prediction.
1237 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
1238 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
1239 report = true;
1240 }
1241
1242 if (report) {
1243 report_ihop_statistics();
1244 }
1245 }
1246
1247 void G1CollectorPolicy::report_ihop_statistics() {
1248 _ihop_control->print();
1249 }
1250
1251 #define EXT_SIZE_FORMAT "%.1f%s"
1252 #define EXT_SIZE_PARAMS(bytes) \
1253 byte_size_in_proper_unit((double)(bytes)), \
1254 proper_unit_for_byte_size((bytes))
1255
1256 void G1CollectorPolicy::print_phases() {
1257 phase_times()->print();
1258 }
1259
1260 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1261 double update_rs_processed_buffers,
1262 double goal_ms) {
1263 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1264 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1265
1266 if (G1UseAdaptiveConcRefinement) {
1267 const int k_gy = 3, k_gr = 6;
1268 const double inc_k = 1.1, dec_k = 0.9;
1269
1270 int g = cg1r->green_zone();
1271 if (update_rs_time > goal_ms) {
1272 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
1273 } else {
1274 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
|