29 #include "gc/g1/g1CollectionSet.hpp"
30 #include "gc/g1/g1CollectionSetCandidates.hpp"
31 #include "gc/g1/g1ConcurrentMark.hpp"
32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
33 #include "gc/g1/g1ConcurrentRefine.hpp"
34 #include "gc/g1/g1ConcurrentRefineStats.hpp"
35 #include "gc/g1/g1CollectionSetChooser.hpp"
36 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
37 #include "gc/g1/g1HotCardCache.hpp"
38 #include "gc/g1/g1IHOPControl.hpp"
39 #include "gc/g1/g1GCPhaseTimes.hpp"
40 #include "gc/g1/g1Policy.hpp"
41 #include "gc/g1/g1SurvivorRegions.hpp"
42 #include "gc/g1/g1YoungGenSizer.hpp"
43 #include "gc/g1/heapRegion.inline.hpp"
44 #include "gc/g1/heapRegionRemSet.hpp"
45 #include "gc/shared/concurrentGCBreakpoints.hpp"
46 #include "gc/shared/gcPolicyCounters.hpp"
47 #include "logging/log.hpp"
48 #include "runtime/arguments.hpp"
49 #include "runtime/java.hpp"
50 #include "runtime/mutexLocker.hpp"
51 #include "utilities/debug.hpp"
52 #include "utilities/growableArray.hpp"
53 #include "utilities/pair.hpp"
54
55 G1Policy::G1Policy(STWGCTimer* gc_timer) :
56 _predictor(G1ConfidencePercent / 100.0),
57 _analytics(new G1Analytics(&_predictor)),
58 _remset_tracker(),
59 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
60 _ihop_control(create_ihop_control(&_predictor)),
61 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
62 _full_collection_start_sec(0.0),
63 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
64 _young_list_target_length(0),
65 _young_list_fixed_length(0),
66 _young_list_max_length(0),
67 _eden_surv_rate_group(new G1SurvRateGroup()),
68 _survivor_surv_rate_group(new G1SurvRateGroup()),
69 _reserve_factor((double) G1ReservePercent / 100.0),
70 _reserve_regions(0),
71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
72 _free_regions_at_end_of_collection(0),
73 _rs_length(0),
74 _rs_length_prediction(0),
75 _pending_cards_at_gc_start(0),
76 _old_gen_alloc_tracker(),
77 _initial_mark_to_mixed(),
78 _collection_set(NULL),
79 _g1h(NULL),
80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
81 _mark_remark_start_sec(0),
82 _mark_cleanup_start_sec(0),
83 _tenuring_threshold(MaxTenuringThreshold),
84 _max_survivor_regions(0),
85 _survivors_age_table(true)
90 delete _ihop_control;
91 delete _young_gen_sizer;
92 }
93
94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
95 if (G1Arguments::is_heterogeneous_heap()) {
96 return new G1HeterogeneousHeapPolicy(gc_timer_stw);
97 } else {
98 return new G1Policy(gc_timer_stw);
99 }
100 }
101
102 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
103
104 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
105 _g1h = g1h;
106 _collection_set = collection_set;
107
108 assert(Heap_lock->owned_by_self(), "Locking discipline.");
109
110 if (!use_adaptive_young_list_length()) {
111 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
112 }
113 _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
114
115 _free_regions_at_end_of_collection = _g1h->num_free_regions();
116
117 update_young_list_max_and_target_length();
118 // We may immediately start allocating regions and placing them on the
119 // collection set list. Initialize the per-collection set info
120 _collection_set->start_incremental_building();
121 }
122
123 void G1Policy::note_gc_start() {
124 phase_times()->note_gc_start();
125 }
126
127 class G1YoungLengthPredictor {
128 const double _base_time_ms;
129 const double _base_free_regions;
130 const double _target_pause_time_ms;
131 const G1Policy* const _policy;
132
133 public:
134 G1YoungLengthPredictor(double base_time_ms,
135 double base_free_regions,
136 double target_pause_time_ms,
137 const G1Policy* policy) :
172 return false;
173 }
174
175 // success!
176 return true;
177 }
178 };
179
180 void G1Policy::record_new_heap_size(uint new_number_of_regions) {
181 // re-calculate the necessary reserve
182 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
183 // We use ceiling so that if reserve_regions_d is > 0.0 (but
184 // smaller than 1.0) we'll get 1.
185 _reserve_regions = (uint) ceil(reserve_regions_d);
186
187 _young_gen_sizer->heap_size_changed(new_number_of_regions);
188
189 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
190 }
191
192 uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const {
193 uint desired_min_length = 0;
194 if (use_adaptive_young_list_length()) {
195 if (_analytics->num_alloc_rate_ms() > 3) {
196 double now_sec = os::elapsedTime();
197 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
198 double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
199 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
200 } else {
201 // otherwise we don't have enough info to make the prediction
202 }
203 }
204 desired_min_length += base_min_length;
205 // make sure we don't go below any user-defined minimum bound
206 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
207 }
208
209 uint G1Policy::calculate_young_list_desired_max_length() const {
210 // Here, we might want to also take into account any additional
211 // constraints (i.e., user-defined minimum bound). Currently, we
212 // effectively don't set this bound.
213 return _young_gen_sizer->max_desired_young_length();
214 }
215
216 uint G1Policy::update_young_list_max_and_target_length() {
217 return update_young_list_max_and_target_length(_analytics->predict_rs_length());
218 }
219
220 uint G1Policy::update_young_list_max_and_target_length(size_t rs_length) {
221 uint unbounded_target_length = update_young_list_target_length(rs_length);
222 update_max_gc_locker_expansion();
223 return unbounded_target_length;
224 }
225
226 uint G1Policy::update_young_list_target_length(size_t rs_length) {
227 YoungTargetLengths young_lengths = young_list_target_lengths(rs_length);
228 _young_list_target_length = young_lengths.first;
229
230 return young_lengths.second;
231 }
232
233 G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_length) const {
234 YoungTargetLengths result;
235
236 // Calculate the absolute and desired min bounds first.
237
238 // This is how many young regions we already have (currently: the survivors).
239 const uint base_min_length = _g1h->survivor_regions_count();
240 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
241 // This is the absolute minimum young length. Ensure that we
242 // will at least have one eden region available for allocation.
243 uint absolute_min_length = base_min_length + MAX2(_g1h->eden_regions_count(), (uint)1);
244 // If we shrank the young list target it should not shrink below the current size.
245 desired_min_length = MAX2(desired_min_length, absolute_min_length);
246 // Calculate the absolute and desired max bounds.
247
248 uint desired_max_length = calculate_young_list_desired_max_length();
249
250 uint young_list_target_length = 0;
251 if (use_adaptive_young_list_length()) {
252 if (collector_state()->in_young_only_phase()) {
253 young_list_target_length =
254 calculate_young_list_target_length(rs_length,
255 base_min_length,
256 desired_min_length,
257 desired_max_length);
258 } else {
259 // Don't calculate anything and let the code below bound it to
260 // the desired_min_length, i.e., do the next GC as soon as
261 // possible to maximize how many old regions we can add to it.
262 }
263 } else {
264 // The user asked for a fixed young gen so we'll fix the young gen
265 // whether the next GC is young or mixed.
266 young_list_target_length = _young_list_fixed_length;
267 }
268
269 result.second = young_list_target_length;
270
271 // We will try our best not to "eat" into the reserve.
272 uint absolute_max_length = 0;
273 if (_free_regions_at_end_of_collection > _reserve_regions) {
274 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
275 }
276 if (desired_max_length > absolute_max_length) {
277 desired_max_length = absolute_max_length;
278 }
279
280 // Make sure we don't go over the desired max length, nor under the
281 // desired min length. In case they clash, desired_min_length wins
282 // which is why that test is second.
283 if (young_list_target_length > desired_max_length) {
284 young_list_target_length = desired_max_length;
285 }
286 if (young_list_target_length < desired_min_length) {
287 young_list_target_length = desired_min_length;
288 }
289
290 assert(young_list_target_length > base_min_length,
291 "we should be able to allocate at least one eden region");
292 assert(young_list_target_length >= absolute_min_length, "post-condition");
293
294 result.first = young_list_target_length;
295 return result;
296 }
297
298 uint G1Policy::calculate_young_list_target_length(size_t rs_length,
299 uint base_min_length,
300 uint desired_min_length,
301 uint desired_max_length) const {
302 assert(use_adaptive_young_list_length(), "pre-condition");
303 assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
304
305 // In case some edge-condition makes the desired max length too small...
306 if (desired_max_length <= desired_min_length) {
307 return desired_min_length;
308 }
309
310 // We'll adjust min_young_length and max_young_length not to include
311 // the already allocated young regions (i.e., so they reflect the
312 // min and max eden regions we'll allocate). The base_min_length
313 // will be reflected in the predictions by the
314 // survivor_regions_evac_time prediction.
315 assert(desired_min_length > base_min_length, "invariant");
316 uint min_young_length = desired_min_length - base_min_length;
317 assert(desired_max_length > base_min_length, "invariant");
318 uint max_young_length = desired_max_length - base_min_length;
319
320 const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
321 const size_t pending_cards = _analytics->predict_pending_cards();
322 const double base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length);
323 const uint available_free_regions = _free_regions_at_end_of_collection;
324 const uint base_free_regions =
325 available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
326
327 // Here, we will make sure that the shortest young length that
328 // makes sense fits within the target pause time.
329
330 G1YoungLengthPredictor p(base_time_ms,
331 base_free_regions,
332 target_pause_time_ms,
333 this);
334 if (p.will_fit(min_young_length)) {
335 // The shortest young length will fit into the target pause time;
336 // we'll now check whether the absolute maximum number of young
337 // regions will fit in the target pause time. If not, we'll do
338 // a binary search between min_young_length and max_young_length.
339 if (p.will_fit(max_young_length)) {
340 // The maximum young length will fit into the target pause time.
341 // We are done so set min young length to the maximum length (as
342 // the result is assumed to be returned in min_young_length).
343 min_young_length = max_young_length;
344 } else {
345 // The maximum possible number of young regions will not fit within
346 // the target pause time so we'll search for the optimal
347 // length. The loop invariants are:
348 //
349 // min_young_length < max_young_length
350 // min_young_length is known to fit into the target pause time
351 // max_young_length is known not to fit into the target pause time
352 //
353 // Going into the loop we know the above hold as we've just
354 // checked them. Every time around the loop we check whether
355 // the middle value between min_young_length and
356 // max_young_length fits into the target pause time. If it
357 // does, it becomes the new min. If it doesn't, it becomes
358 // the new max. This way we maintain the loop invariants.
359
360 assert(min_young_length < max_young_length, "invariant");
361 uint diff = (max_young_length - min_young_length) / 2;
362 while (diff > 0) {
363 uint young_length = min_young_length + diff;
364 if (p.will_fit(young_length)) {
365 min_young_length = young_length;
366 } else {
367 max_young_length = young_length;
368 }
369 assert(min_young_length < max_young_length, "invariant");
370 diff = (max_young_length - min_young_length) / 2;
371 }
372 // The results is min_young_length which, according to the
373 // loop invariants, should fit within the target pause time.
374
375 // These are the post-conditions of the binary search above:
376 assert(min_young_length < max_young_length,
377 "otherwise we should have discovered that max_young_length "
378 "fits into the pause target and not done the binary search");
379 assert(p.will_fit(min_young_length),
380 "min_young_length, the result of the binary search, should "
381 "fit into the pause target");
382 assert(!p.will_fit(min_young_length + 1),
383 "min_young_length, the result of the binary search, should be "
384 "optimal, so no larger length should fit into the pause target");
385 }
386 } else {
387 // Even the minimum length doesn't fit into the pause time
388 // target, return it as the result nevertheless.
389 }
390 return base_min_length + min_young_length;
391 }
392
393 double G1Policy::predict_survivor_regions_evac_time() const {
394 double survivor_regions_evac_time = 0.0;
395 const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
396 for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
397 it != survivor_regions->end();
398 ++it) {
399 survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase());
400 }
401 return survivor_regions_evac_time;
402 }
403
404 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
405 guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
406
407 if (rs_length > _rs_length_prediction) {
408 // add 10% to avoid having to recalculate often
409 size_t rs_length_prediction = rs_length * 1100 / 1000;
410 update_rs_length_prediction(rs_length_prediction);
411
412 update_young_list_max_and_target_length(rs_length_prediction);
413 }
414 }
415
416 void G1Policy::update_rs_length_prediction() {
417 update_rs_length_prediction(_analytics->predict_rs_length());
418 }
419
420 void G1Policy::update_rs_length_prediction(size_t prediction) {
421 if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
422 _rs_length_prediction = prediction;
423 }
424 }
425
426 void G1Policy::record_full_collection_start() {
427 _full_collection_start_sec = os::elapsedTime();
428 // Release the future to-space so that it is available for compaction into.
429 collector_state()->set_in_young_only_phase(false);
430 collector_state()->set_in_full_gc(true);
431 _collection_set->clear_candidates();
432 _pending_cards_at_gc_start = 0;
440 double full_gc_time_ms = full_gc_time_sec * 1000.0;
441
442 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
443
444 collector_state()->set_in_full_gc(false);
445
446 // "Nuke" the heuristics that control the young/mixed GC
447 // transitions and make sure we start with young GCs after the Full GC.
448 collector_state()->set_in_young_only_phase(true);
449 collector_state()->set_in_young_gc_before_mixed(false);
450 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
451 collector_state()->set_in_initial_mark_gc(false);
452 collector_state()->set_mark_or_rebuild_in_progress(false);
453 collector_state()->set_clearing_next_bitmap(false);
454
455 _eden_surv_rate_group->start_adding_regions();
456 // also call this on any additional surv rate groups
457
458 _free_regions_at_end_of_collection = _g1h->num_free_regions();
459 _survivor_surv_rate_group->reset();
460 update_young_list_max_and_target_length();
461 update_rs_length_prediction();
462
463 _old_gen_alloc_tracker.reset_after_full_gc();
464
465 record_pause(FullGC, _full_collection_start_sec, end_sec);
466 }
467
468 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
469 log_debug(gc, refine, stats)
470 ("%s refinement: %.2fms, refined: " SIZE_FORMAT
471 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
472 kind,
473 stats.refinement_time().seconds() * MILLIUNITS,
474 stats.refined_cards(),
475 stats.precleaned_cards(),
476 stats.dirtied_cards());
477 }
478
479 void G1Policy::record_concurrent_refinement_stats() {
480 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
770 // During mixed gc we do not use them for young gen sizing.
771 if (this_pause_was_young_only) {
772 _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
773 _analytics->report_rs_length((double) _rs_length);
774 }
775 }
776
777 assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
778 "If the last pause has been an initial mark, we should not have been in the marking window");
779 if (this_pause_included_initial_mark) {
780 collector_state()->set_mark_or_rebuild_in_progress(true);
781 }
782
783 _free_regions_at_end_of_collection = _g1h->num_free_regions();
784
785 update_rs_length_prediction();
786
787 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
788 // that in this case we are not running in a "normal" operating mode.
789 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
790 // IHOP control wants to know the expected young gen length if it were not
791 // restrained by the heap reserve. Using the actual length would make the
792 // prediction too small and the limit the young gen every time we get to the
793 // predicted target occupancy.
794 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
795
796 _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
797 update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
798 _old_gen_alloc_tracker.last_cycle_old_bytes(),
799 last_unrestrained_young_length * HeapRegion::GrainBytes,
800 this_pause_was_young_only);
801
802 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
803 } else {
804 // Any garbage collection triggered as periodic collection resets the time-to-mixed
805 // measurement. Periodic collection typically means that the application is "inactive", i.e.
806 // the marking threads may have received an uncharacterisic amount of cpu time
807 // for completing the marking, i.e. are faster than expected.
808 // This skews the predicted marking length towards smaller values which might cause
809 // the mark start being too late.
810 _initial_mark_to_mixed.reset();
811 }
812
813 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
814 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
815
816 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
817 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
818 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
819 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
829 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
830
831 _g1h->concurrent_refine()->adjust(logged_cards_time,
832 phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
833 scan_logged_cards_time_goal_ms);
834 }
835
836 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
837 if (G1UseAdaptiveIHOP) {
838 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
839 predictor,
840 G1ReservePercent,
841 G1HeapWastePercent);
842 } else {
843 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
844 }
845 }
846
847 void G1Policy::update_ihop_prediction(double mutator_time_s,
848 size_t mutator_alloc_bytes,
849 size_t young_gen_size,
850 bool this_gc_was_young_only) {
851 // Always try to update IHOP prediction. Even evacuation failures give information
852 // about e.g. whether to start IHOP earlier next time.
853
854 // Avoid using really small application times that might create samples with
855 // very high or very low values. They may be caused by e.g. back-to-back gcs.
856 double const min_valid_time = 1e-6;
857
858 bool report = false;
859
860 double marking_to_mixed_time = -1.0;
861 if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) {
862 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
863 assert(marking_to_mixed_time > 0.0,
864 "Initial mark to mixed time must be larger than zero but is %.3f",
865 marking_to_mixed_time);
866 if (marking_to_mixed_time > min_valid_time) {
867 _ihop_control->update_marking_length(marking_to_mixed_time);
868 report = true;
869 }
870 }
871
872 // As an approximation for the young gc promotion rates during marking we use
873 // all of them. In many applications there are only a few if any young gcs during
874 // marking, which makes any prediction useless. This increases the accuracy of the
875 // prediction.
876 if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
877 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
878 report = true;
879 }
880
881 if (report) {
882 report_ihop_statistics();
883 }
884 }
885
886 void G1Policy::report_ihop_statistics() {
887 _ihop_control->print();
888 }
889
890 void G1Policy::print_phases() {
891 phase_times()->print();
892 }
893
894 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
895 size_t rs_length) const {
896 size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());
963
964 bool G1Policy::can_expand_young_list() const {
965 uint young_list_length = _g1h->young_regions_count();
966 uint young_list_max_length = _young_list_max_length;
967 return young_list_length < young_list_max_length;
968 }
969
970 bool G1Policy::use_adaptive_young_list_length() const {
971 return _young_gen_sizer->use_adaptive_young_list_length();
972 }
973
974 size_t G1Policy::desired_survivor_size(uint max_regions) const {
975 size_t const survivor_capacity = HeapRegion::GrainWords * max_regions;
976 return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
977 }
978
979 void G1Policy::print_age_table() {
980 _survivors_age_table.print_age_table(_tenuring_threshold);
981 }
982
983 void G1Policy::update_max_gc_locker_expansion() {
984 uint expansion_region_num = 0;
985 if (GCLockerEdenExpansionPercent > 0) {
986 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
987 double expansion_region_num_d = perc * (double) _young_list_target_length;
988 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
989 // less than 1.0) we'll get 1.
990 expansion_region_num = (uint) ceil(expansion_region_num_d);
991 } else {
992 assert(expansion_region_num == 0, "sanity");
993 }
994 _young_list_max_length = _young_list_target_length + expansion_region_num;
995 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
996 }
997
998 // Calculates survivor space parameters.
999 void G1Policy::update_survivors_policy() {
1000 double max_survivor_regions_d =
1001 (double) _young_list_target_length / (double) SurvivorRatio;
1002
1003 // Calculate desired survivor size based on desired max survivor regions (unconstrained
1004 // by remaining heap). Otherwise we may cause undesired promotions as we are
1005 // already getting close to end of the heap, impacting performance even more.
1006 uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1007 size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1008
1009 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1010 if (UsePerfData) {
1011 _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1012 _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1013 }
1014 // The real maximum survivor size is bounded by the number of regions that can
1015 // be allocated into.
1184 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1185 _initial_mark_to_mixed.record_initial_mark_end(end);
1186 }
1187 break;
1188 case MixedGC:
1189 _initial_mark_to_mixed.record_mixed_gc_start(start);
1190 break;
1191 default:
1192 ShouldNotReachHere();
1193 }
1194 }
1195
1196 void G1Policy::abort_time_to_mixed_tracking() {
1197 _initial_mark_to_mixed.reset();
1198 }
1199
1200 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1201 const char* false_action_str) const {
1202 G1CollectionSetCandidates* candidates = _collection_set->candidates();
1203
1204 if (candidates->is_empty()) {
1205 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1206 return false;
1207 }
1208
1209 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1210 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1211 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1212 double threshold = (double) G1HeapWastePercent;
1213 if (reclaimable_percent <= threshold) {
1214 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1215 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1216 return false;
1217 }
1218 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1219 true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1220 return true;
1221 }
1222
1223 uint G1Policy::calc_min_old_cset_length() const {
1224 // The min old CSet region bound is based on the maximum desired
1225 // number of mixed GCs after a cycle. I.e., even if some old regions
1226 // look expensive, we should add them to the CSet anyway to make
1227 // sure we go through the available old regions in no more than the
1228 // maximum desired number of mixed GCs.
1229 //
1230 // The calculation is based on the number of marked regions we added
1231 // to the CSet candidates in the first place, not how many remain, so
1232 // that the result is the same during all mixed GCs that follow a cycle.
1233
1234 const size_t region_num = _collection_set->candidates()->num_regions();
1235 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1236 size_t result = region_num / gc_num;
1237 // emulate ceiling
1238 if (result * gc_num < region_num) {
1239 result += 1;
|
29 #include "gc/g1/g1CollectionSet.hpp"
30 #include "gc/g1/g1CollectionSetCandidates.hpp"
31 #include "gc/g1/g1ConcurrentMark.hpp"
32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
33 #include "gc/g1/g1ConcurrentRefine.hpp"
34 #include "gc/g1/g1ConcurrentRefineStats.hpp"
35 #include "gc/g1/g1CollectionSetChooser.hpp"
36 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
37 #include "gc/g1/g1HotCardCache.hpp"
38 #include "gc/g1/g1IHOPControl.hpp"
39 #include "gc/g1/g1GCPhaseTimes.hpp"
40 #include "gc/g1/g1Policy.hpp"
41 #include "gc/g1/g1SurvivorRegions.hpp"
42 #include "gc/g1/g1YoungGenSizer.hpp"
43 #include "gc/g1/heapRegion.inline.hpp"
44 #include "gc/g1/heapRegionRemSet.hpp"
45 #include "gc/shared/concurrentGCBreakpoints.hpp"
46 #include "gc/shared/gcPolicyCounters.hpp"
47 #include "logging/log.hpp"
48 #include "runtime/arguments.hpp"
49 #include "runtime/globals.hpp"
50 #include "runtime/java.hpp"
51 #include "runtime/mutexLocker.hpp"
52 #include "utilities/debug.hpp"
53 #include "utilities/growableArray.hpp"
54 #include "utilities/pair.hpp"
55
56 G1Policy::G1Policy(STWGCTimer* gc_timer) :
57 _predictor(G1ConfidencePercent / 100.0),
58 _analytics(new G1Analytics(&_predictor)),
59 _remset_tracker(),
60 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
61 _ihop_control(create_ihop_control(&_predictor)),
62 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
63 _full_collection_start_sec(0.0),
64 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
65 _young_list_desired_length(0),
66 _young_list_target_length(0),
67 _young_list_max_length(0),
68 _eden_surv_rate_group(new G1SurvRateGroup()),
69 _survivor_surv_rate_group(new G1SurvRateGroup()),
70 _reserve_factor((double) G1ReservePercent / 100.0),
71 _reserve_regions(0),
72 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
73 _free_regions_at_end_of_collection(0),
74 _rs_length(0),
75 _rs_length_prediction(0),
76 _pending_cards_at_gc_start(0),
77 _old_gen_alloc_tracker(),
78 _initial_mark_to_mixed(),
79 _collection_set(NULL),
80 _g1h(NULL),
81 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
82 _mark_remark_start_sec(0),
83 _mark_cleanup_start_sec(0),
84 _tenuring_threshold(MaxTenuringThreshold),
85 _max_survivor_regions(0),
86 _survivors_age_table(true)
91 delete _ihop_control;
92 delete _young_gen_sizer;
93 }
94
95 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
96 if (G1Arguments::is_heterogeneous_heap()) {
97 return new G1HeterogeneousHeapPolicy(gc_timer_stw);
98 } else {
99 return new G1Policy(gc_timer_stw);
100 }
101 }
102
103 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
104
105 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
106 _g1h = g1h;
107 _collection_set = collection_set;
108
109 assert(Heap_lock->owned_by_self(), "Locking discipline.");
110
111 _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
112
113 _free_regions_at_end_of_collection = _g1h->num_free_regions();
114
115 update_young_length_bounds();
116 // We may immediately start allocating regions and placing them on the
117 // collection set list. Initialize the per-collection set info
118 _collection_set->start_incremental_building();
119 }
120
121 void G1Policy::note_gc_start() {
122 phase_times()->note_gc_start();
123 }
124
125 class G1YoungLengthPredictor {
126 const double _base_time_ms;
127 const double _base_free_regions;
128 const double _target_pause_time_ms;
129 const G1Policy* const _policy;
130
131 public:
132 G1YoungLengthPredictor(double base_time_ms,
133 double base_free_regions,
134 double target_pause_time_ms,
135 const G1Policy* policy) :
170 return false;
171 }
172
173 // success!
174 return true;
175 }
176 };
177
178 void G1Policy::record_new_heap_size(uint new_number_of_regions) {
179 // re-calculate the necessary reserve
180 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
181 // We use ceiling so that if reserve_regions_d is > 0.0 (but
182 // smaller than 1.0) we'll get 1.
183 _reserve_regions = (uint) ceil(reserve_regions_d);
184
185 _young_gen_sizer->heap_size_changed(new_number_of_regions);
186
187 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
188 }
189
190 uint G1Policy::calculate_desired_eden_length_by_mmu() const {
191 // One could argue that any useful eden length to keep any MMU would be 1, but
192 // in theory this is possible. Other constraints enforce a minimum eden of 1
193 // anyway.
194 uint desired_min_length = 0;
195 if (use_adaptive_young_list_length()) {
196 double now_sec = os::elapsedTime();
197 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
198 double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
199 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
200 }
201 return desired_min_length;
202 }
203
204 void G1Policy::update_young_length_bounds() {
205 update_young_length_bounds(_analytics->predict_rs_length());
206 }
207
208 void G1Policy::update_young_length_bounds(size_t rs_length) {
209 _young_list_desired_length = calculate_young_desired_length(rs_length);
210 _young_list_target_length = calculate_young_target_length(_young_list_desired_length);
211 _young_list_max_length = calculate_young_max_length(_young_list_target_length);
212
213 log_debug(gc,ergo,heap)("Young list lengths: desired: %u, target: %u, max: %u",
214 _young_list_desired_length,
215 _young_list_target_length,
216 _young_list_max_length);
217 }
218
219 // Calculates desired young gen length. It is calculated from:
220 //
221 // - sizer min/max bounds on young gen
222 // - pause time goal for whole young gen evacuation
223 // - MMU goal influencing eden to make GCs spaced apart.
224 // - a minimum one eden region length.
225 //
226 // We may enter with already allocated eden and survivor regions, that may be
227 // higher than the maximum, or the above goals may result in a desired value
228 // smaller than are already allocated.
229 // The main reason is revising young length, with our without the GCLocker being
230 // active.
231 //
232 uint G1Policy::calculate_young_desired_length(size_t rs_length) const {
233 uint min_young_length_by_sizer = _young_gen_sizer->min_desired_young_length();
234 uint max_young_length_by_sizer = _young_gen_sizer->max_desired_young_length();
235
236 assert(min_young_length_by_sizer >= 1, "invariant");
237 assert(max_young_length_by_sizer >= min_young_length_by_sizer, "invariant");
238
239 // Absolute minimum eden length.
240 // Enforcing a minimum eden length helps at startup when the predictors are not
241 // yet trained on the application to avoid unnecessary (but very short) full gcs
242 // on very small (initial) heaps.
243 uint const MinDesiredEdenLength = 1;
244
245 // Calculate the absolute and desired min bounds first.
246
247 // This is how many survivor regions we already have.
248 const uint survivor_length = _g1h->survivor_regions_count();
249 // Size of the already allocated young gen.
250 const uint allocated_young_length = _g1h->young_regions_count();
251 // This is the absolute minimum young length that we can return. Ensure that we
252 // don't go below any user-defined minimum bound; but we might have already
253 // allocated more than that for reasons. In this case, use that.
254 uint absolute_min_young_length = MAX2(allocated_young_length, min_young_length_by_sizer);
255 // Calculate the absolute max bounds. After evac failure or when revising the
256 // young length we might have exceeded absolute min length or absolute_max_length,
257 // so adjust the result accordingly.
258 uint absolute_max_young_length = MAX2(max_young_length_by_sizer, absolute_min_young_length);
259
260 uint desired_eden_length_by_mmu = 0;
261 uint desired_eden_length_by_pause = 0;
262 uint desired_eden_length_before_mixed = 0;
263
264 uint desired_young_length = 0;
265 if (use_adaptive_young_list_length()) {
266 desired_eden_length_by_mmu = calculate_desired_eden_length_by_mmu();
267
268 const size_t pending_cards = _analytics->predict_pending_cards();
269 double survivor_base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length);
270
271 if (!next_gc_should_be_mixed(NULL, NULL)) {
272 desired_eden_length_by_pause =
273 calculate_desired_eden_length_by_pause(survivor_base_time_ms,
274 absolute_min_young_length - survivor_length,
275 absolute_max_young_length - survivor_length);
276 } else {
277 desired_eden_length_before_mixed =
278 calculate_desired_eden_length_before_mixed(survivor_base_time_ms,
279 absolute_min_young_length - survivor_length,
280 absolute_max_young_length - survivor_length);
281 }
282 // Above either sets desired_eden_length_by_pause or desired_eden_length_before_mixed,
283 // the other is zero. Use the one that has been set below.
284 uint desired_eden_length = MAX2(desired_eden_length_by_pause,
285 desired_eden_length_before_mixed);
286
287 // Finally incorporate MMU concerns; assume that it overrides the pause time
288 // goal, as the default value has been chosen to effectively disable it.
289 // Also request at least one eden region, see above for reasons.
290 desired_eden_length = MAX3(desired_eden_length,
291 desired_eden_length_by_mmu,
292 MinDesiredEdenLength);
293
294 desired_young_length = desired_eden_length + survivor_length;
295 } else {
296 // The user asked for a fixed young gen so we'll fix the young gen
297 // whether the next GC is young or mixed.
298 desired_young_length = min_young_length_by_sizer;
299 }
300 // Clamp to absolute min/max after we determined desired lengths.
301 desired_young_length = clamp(desired_young_length, absolute_min_young_length, absolute_max_young_length);
302
303 log_trace(gc, ergo, heap)("Young desired length %u "
304 "survivor length %u "
305 "allocated young length %u "
306 "absolute min young length %u "
307 "absolute max young length %u "
308 "desired eden length by mmu %u "
309 "desired eden length by pause %u "
310 "desired eden length before mixed %u"
311 "desired eden length by default %u",
312 desired_young_length, survivor_length,
313 allocated_young_length, absolute_min_young_length,
314 absolute_max_young_length, desired_eden_length_by_mmu,
315 desired_eden_length_by_pause,
316 desired_eden_length_before_mixed,
317 MinDesiredEdenLength);
318
319 assert(desired_young_length >= allocated_young_length, "must be");
320 return desired_young_length;
321 }
322
323 // Limit the desired (wished) young length by current free regions. If the request
324 // can be satisfied without using up reserve regions, do so, otherwise eat into
325 // the reserve, giving away at most what the heap sizer allows.
326 uint G1Policy::calculate_young_target_length(uint desired_young_length) const {
327 uint allocated_young_length = _g1h->young_regions_count();
328
329 uint receiving_additional_eden;
330 if (allocated_young_length >= desired_young_length) {
331 // Already used up all we actually want (may happen as G1 revises the
332 // young list length concurrently, or caused by gclocker). Do not allow more,
333 // potentially resulting in GC.
334 receiving_additional_eden = 0;
335 log_trace(gc, ergo, heap)("Young target length: Already used up desired young %u allocated %u",
336 desired_young_length,
337 allocated_young_length);
338 } else {
339 // Now look at how many free regions are there currently, and the heap reserve.
340 // We will try our best not to "eat" into the reserve as long as we can. If we
341 // do, we at most eat the sizer's minimum regions into the reserve or half the
342 // reserve rounded up (if possible; this is an arbitrary value).
343
344 uint max_to_eat_into_reserve = MIN2(_young_gen_sizer->min_desired_young_length(),
345 (_reserve_regions + 1) / 2);
346
347 log_trace(gc, ergo, heap)("Young target length: Common "
348 "free regions at end of collection %u "
349 "desired young length %u "
350 "reserve region %u "
351 "max to eat into reserve %u",
352 _free_regions_at_end_of_collection,
353 desired_young_length,
354 _reserve_regions,
355 max_to_eat_into_reserve);
356
357 if (_free_regions_at_end_of_collection <= _reserve_regions) {
358 // Fully eat (or already eating) into the reserve, hand back at most absolute_min_length regions.
359 uint receiving_young = MIN3(_free_regions_at_end_of_collection,
360 desired_young_length,
361 max_to_eat_into_reserve);
362 // We could already have allocated more regions than what we could get
363 // above.
364 receiving_additional_eden = allocated_young_length < receiving_young ?
365 receiving_young - allocated_young_length : 0;
366
367 log_trace(gc, ergo, heap)("Young target length: Fully eat into reserve "
368 "receiving young %u receiving additional eden %u",
369 receiving_young,
370 receiving_additional_eden);
371 } else if (_free_regions_at_end_of_collection < (desired_young_length + _reserve_regions)) {
372 // Partially eat into the reserve, at most max_to_eat_into_reserve regions.
373 uint free_outside_reserve = _free_regions_at_end_of_collection - _reserve_regions;
374 assert(free_outside_reserve < desired_young_length,
375 "must be %u %u",
376 free_outside_reserve, desired_young_length);
377
378 uint receiving_within_reserve = MIN2(desired_young_length - free_outside_reserve,
379 max_to_eat_into_reserve);
380 uint receiving_young = free_outside_reserve + receiving_within_reserve;
381 // Again, we could have already allocated more than we could get.
382 receiving_additional_eden = allocated_young_length < receiving_young ?
383 receiving_young - allocated_young_length : 0;
384
385 log_trace(gc, ergo, heap)("Young target length: Partially eat into reserve "
386 "free outside reserve %u "
387 "receiving within reserve %u "
388 "receiving young %u "
389 "receiving additional eden %u",
390 free_outside_reserve, receiving_within_reserve,
391 receiving_young, receiving_additional_eden);
392 } else {
393 // No need to use the reserve.
394 receiving_additional_eden = desired_young_length - allocated_young_length;
395 log_trace(gc, ergo, heap)("Young target length: No need to use reserve "
396 "receiving additional eden %u",
397 receiving_additional_eden);
398 }
399 }
400
401 uint target_young_length = allocated_young_length + receiving_additional_eden;
402
403 assert(target_young_length >= allocated_young_length, "must be");
404
405 log_trace(gc, ergo, heap)("Young target length: "
406 "young target length %u "
407 "allocated young length %u "
408 "received additional eden %u",
409 target_young_length, allocated_young_length,
410 receiving_additional_eden);
411 return target_young_length;
412 }
413
414 uint G1Policy::calculate_desired_eden_length_by_pause(double base_time_ms,
415 uint min_eden_length,
416 uint max_eden_length) const {
417 assert(use_adaptive_young_list_length(), "pre-condition");
418
419 assert(min_eden_length <= max_eden_length, "must be %u %u", min_eden_length, max_eden_length);
420
421 // Here, we will make sure that the shortest young length that
422 // makes sense fits within the target pause time.
423
424 G1YoungLengthPredictor p(base_time_ms,
425 _free_regions_at_end_of_collection,
426 _mmu_tracker->max_gc_time() * 1000.0,
427 this);
428 if (p.will_fit(min_eden_length)) {
429 // The shortest young length will fit into the target pause time;
430 // we'll now check whether the absolute maximum number of young
431 // regions will fit in the target pause time. If not, we'll do
432 // a binary search between min_young_length and max_young_length.
433 if (p.will_fit(max_eden_length)) {
434 // The maximum young length will fit into the target pause time.
435 // We are done so set min young length to the maximum length (as
436 // the result is assumed to be returned in min_young_length).
437 min_eden_length = max_eden_length;
438 } else {
439 // The maximum possible number of young regions will not fit within
440 // the target pause time so we'll search for the optimal
441 // length. The loop invariants are:
442 //
443 // min_young_length < max_young_length
444 // min_young_length is known to fit into the target pause time
445 // max_young_length is known not to fit into the target pause time
446 //
447 // Going into the loop we know the above hold as we've just
448 // checked them. Every time around the loop we check whether
449 // the middle value between min_young_length and
450 // max_young_length fits into the target pause time. If it
451 // does, it becomes the new min. If it doesn't, it becomes
452 // the new max. This way we maintain the loop invariants.
453
454 assert(min_eden_length < max_eden_length, "invariant");
455 uint diff = (max_eden_length - min_eden_length) / 2;
456 while (diff > 0) {
457 uint eden_length = min_eden_length + diff;
458 if (p.will_fit(eden_length)) {
459 min_eden_length = eden_length;
460 } else {
461 max_eden_length = eden_length;
462 }
463 assert(min_eden_length < max_eden_length, "invariant");
464 diff = (max_eden_length - min_eden_length) / 2;
465 }
466 // The results is min_young_length which, according to the
467 // loop invariants, should fit within the target pause time.
468
469 // These are the post-conditions of the binary search above:
470 assert(min_eden_length < max_eden_length,
471 "otherwise we should have discovered that max_eden_length "
472 "fits into the pause target and not done the binary search");
473 assert(p.will_fit(min_eden_length),
474 "min_eden_length, the result of the binary search, should "
475 "fit into the pause target");
476 assert(!p.will_fit(min_eden_length + 1),
477 "min_eden_length, the result of the binary search, should be "
478 "optimal, so no larger length should fit into the pause target");
479 }
480 } else {
481 // Even the minimum length doesn't fit into the pause time
482 // target, return it as the result nevertheless.
483 }
484 return min_eden_length;
485 }
486
487 uint G1Policy::calculate_desired_eden_length_before_mixed(double survivor_base_time_ms,
488 uint min_eden_length,
489 uint max_eden_length) const {
490 G1CollectionSetCandidates* candidates = _collection_set->candidates();
491
492 uint min_old_regions_end = MIN2(candidates->cur_idx() + calc_min_old_cset_length(), candidates->num_regions());
493 double predicted_region_evac_time_ms = survivor_base_time_ms;
494 for (uint i = candidates->cur_idx(); i < min_old_regions_end; i++) {
495 HeapRegion* r = candidates->at(i);
496 predicted_region_evac_time_ms += predict_region_total_time_ms(r, false);
497 }
498 uint desired_eden_length_by_min_cset_length =
499 calculate_desired_eden_length_by_pause(predicted_region_evac_time_ms,
500 min_eden_length,
501 max_eden_length);
502
503 return desired_eden_length_by_min_cset_length;
504 }
505
506 double G1Policy::predict_survivor_regions_evac_time() const {
507 double survivor_regions_evac_time = 0.0;
508 const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
509 for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
510 it != survivor_regions->end();
511 ++it) {
512 survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase());
513 }
514 return survivor_regions_evac_time;
515 }
516
517 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
518 guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
519
520 if (rs_length > _rs_length_prediction) {
521 // add 10% to avoid having to recalculate often
522 size_t rs_length_prediction = rs_length * 1100 / 1000;
523 update_rs_length_prediction(rs_length_prediction);
524 update_young_length_bounds(rs_length_prediction);
525 }
526 }
527
528 void G1Policy::update_rs_length_prediction() {
529 update_rs_length_prediction(_analytics->predict_rs_length());
530 }
531
532 void G1Policy::update_rs_length_prediction(size_t prediction) {
533 if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
534 _rs_length_prediction = prediction;
535 }
536 }
537
538 void G1Policy::record_full_collection_start() {
539 _full_collection_start_sec = os::elapsedTime();
540 // Release the future to-space so that it is available for compaction into.
541 collector_state()->set_in_young_only_phase(false);
542 collector_state()->set_in_full_gc(true);
543 _collection_set->clear_candidates();
544 _pending_cards_at_gc_start = 0;
552 double full_gc_time_ms = full_gc_time_sec * 1000.0;
553
554 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
555
556 collector_state()->set_in_full_gc(false);
557
558 // "Nuke" the heuristics that control the young/mixed GC
559 // transitions and make sure we start with young GCs after the Full GC.
560 collector_state()->set_in_young_only_phase(true);
561 collector_state()->set_in_young_gc_before_mixed(false);
562 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
563 collector_state()->set_in_initial_mark_gc(false);
564 collector_state()->set_mark_or_rebuild_in_progress(false);
565 collector_state()->set_clearing_next_bitmap(false);
566
567 _eden_surv_rate_group->start_adding_regions();
568 // also call this on any additional surv rate groups
569
570 _free_regions_at_end_of_collection = _g1h->num_free_regions();
571 _survivor_surv_rate_group->reset();
572 update_young_length_bounds();
573 update_rs_length_prediction();
574
575 _old_gen_alloc_tracker.reset_after_full_gc();
576
577 record_pause(FullGC, _full_collection_start_sec, end_sec);
578 }
579
580 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
581 log_debug(gc, refine, stats)
582 ("%s refinement: %.2fms, refined: " SIZE_FORMAT
583 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
584 kind,
585 stats.refinement_time().seconds() * MILLIUNITS,
586 stats.refined_cards(),
587 stats.precleaned_cards(),
588 stats.dirtied_cards());
589 }
590
591 void G1Policy::record_concurrent_refinement_stats() {
592 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
882 // During mixed gc we do not use them for young gen sizing.
883 if (this_pause_was_young_only) {
884 _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
885 _analytics->report_rs_length((double) _rs_length);
886 }
887 }
888
889 assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
890 "If the last pause has been an initial mark, we should not have been in the marking window");
891 if (this_pause_included_initial_mark) {
892 collector_state()->set_mark_or_rebuild_in_progress(true);
893 }
894
895 _free_regions_at_end_of_collection = _g1h->num_free_regions();
896
897 update_rs_length_prediction();
898
899 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
900 // that in this case we are not running in a "normal" operating mode.
901 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
902 update_young_length_bounds();
903
904 _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
905 update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
906 _old_gen_alloc_tracker.last_cycle_old_bytes(),
907 this_pause_was_young_only);
908
909 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
910 } else {
911 // Any garbage collection triggered as periodic collection resets the time-to-mixed
912 // measurement. Periodic collection typically means that the application is "inactive", i.e.
913 // the marking threads may have received an uncharacterisic amount of cpu time
914 // for completing the marking, i.e. are faster than expected.
915 // This skews the predicted marking length towards smaller values which might cause
916 // the mark start being too late.
917 _initial_mark_to_mixed.reset();
918 }
919
920 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
921 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
922
923 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
924 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
925 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
926 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
936 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
937
938 _g1h->concurrent_refine()->adjust(logged_cards_time,
939 phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
940 scan_logged_cards_time_goal_ms);
941 }
942
943 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
944 if (G1UseAdaptiveIHOP) {
945 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
946 predictor,
947 G1ReservePercent,
948 G1HeapWastePercent);
949 } else {
950 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
951 }
952 }
953
954 void G1Policy::update_ihop_prediction(double mutator_time_s,
955 size_t mutator_alloc_bytes,
956 bool this_gc_was_young_only) {
957 // Always try to update IHOP prediction. Even evacuation failures give information
958 // about e.g. whether to start IHOP earlier next time.
959
960 // Avoid using really small application times that might create samples with
961 // very high or very low values. They may be caused by e.g. back-to-back gcs.
962 double const min_valid_time = 1e-6;
963
964 bool report = false;
965
966 double marking_to_mixed_time = -1.0;
967 if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) {
968 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
969 assert(marking_to_mixed_time > 0.0,
970 "Initial mark to mixed time must be larger than zero but is %.3f",
971 marking_to_mixed_time);
972 if (marking_to_mixed_time > min_valid_time) {
973 _ihop_control->update_marking_length(marking_to_mixed_time);
974 report = true;
975 }
976 }
977
978 // As an approximation for the young gc promotion rates during marking we use
979 // all of them. In many applications there are only a few if any young gcs during
980 // marking, which makes any prediction useless. This increases the accuracy of the
981 // prediction.
982 if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
983 // IHOP control wants to know the expected young gen length if it were not
984 // restrained by the heap reserve. Using the actual length would make the
985 // prediction too small and the limit the young gen every time we get to the
986 // predicted target occupancy.
987 uint young_gen_size = young_list_desired_length() * HeapRegion::GrainBytes;
988 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
989 report = true;
990 }
991
992 if (report) {
993 report_ihop_statistics();
994 }
995 }
996
997 void G1Policy::report_ihop_statistics() {
998 _ihop_control->print();
999 }
1000
1001 void G1Policy::print_phases() {
1002 phase_times()->print();
1003 }
1004
1005 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
1006 size_t rs_length) const {
1007 size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());
1074
1075 bool G1Policy::can_expand_young_list() const {
1076 uint young_list_length = _g1h->young_regions_count();
1077 uint young_list_max_length = _young_list_max_length;
1078 return young_list_length < young_list_max_length;
1079 }
1080
1081 bool G1Policy::use_adaptive_young_list_length() const {
1082 return _young_gen_sizer->use_adaptive_young_list_length();
1083 }
1084
1085 size_t G1Policy::desired_survivor_size(uint max_regions) const {
1086 size_t const survivor_capacity = HeapRegion::GrainWords * max_regions;
1087 return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
1088 }
1089
1090 void G1Policy::print_age_table() {
1091 _survivors_age_table.print_age_table(_tenuring_threshold);
1092 }
1093
1094 uint G1Policy::calculate_young_max_length(uint target_young_length) const {
1095 uint expansion_region_num = 0;
1096 if (GCLockerEdenExpansionPercent > 0) {
1097 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1098 double expansion_region_num_d = perc * (double) _young_list_target_length;
1099 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1100 // less than 1.0) we'll get 1.
1101 expansion_region_num = (uint) ceil(expansion_region_num_d);
1102 } else {
1103 assert(expansion_region_num == 0, "sanity");
1104 }
1105 uint max_length = target_young_length + expansion_region_num;
1106 assert(target_young_length <= max_length, "post-condition");
1107 return max_length;
1108 }
1109
1110 // Calculates survivor space parameters.
1111 void G1Policy::update_survivors_policy() {
1112 double max_survivor_regions_d =
1113 (double) _young_list_target_length / (double) SurvivorRatio;
1114
1115 // Calculate desired survivor size based on desired max survivor regions (unconstrained
1116 // by remaining heap). Otherwise we may cause undesired promotions as we are
1117 // already getting close to end of the heap, impacting performance even more.
1118 uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1119 size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1120
1121 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1122 if (UsePerfData) {
1123 _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1124 _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1125 }
1126 // The real maximum survivor size is bounded by the number of regions that can
1127 // be allocated into.
1296 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1297 _initial_mark_to_mixed.record_initial_mark_end(end);
1298 }
1299 break;
1300 case MixedGC:
1301 _initial_mark_to_mixed.record_mixed_gc_start(start);
1302 break;
1303 default:
1304 ShouldNotReachHere();
1305 }
1306 }
1307
1308 void G1Policy::abort_time_to_mixed_tracking() {
1309 _initial_mark_to_mixed.reset();
1310 }
1311
1312 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1313 const char* false_action_str) const {
1314 G1CollectionSetCandidates* candidates = _collection_set->candidates();
1315
1316 if (candidates == NULL || candidates->is_empty()) {
1317 if (false_action_str != NULL) {
1318 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1319 }
1320 return false;
1321 }
1322
1323 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1324 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1325 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1326 double threshold = (double) G1HeapWastePercent;
1327 if (reclaimable_percent <= threshold) {
1328 if (false_action_str != NULL) {
1329 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1330 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1331 }
1332 return false;
1333 }
1334 if (true_action_str != NULL) {
1335 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1336 true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1337 }
1338 return true;
1339 }
1340
1341 uint G1Policy::calc_min_old_cset_length() const {
1342 // The min old CSet region bound is based on the maximum desired
1343 // number of mixed GCs after a cycle. I.e., even if some old regions
1344 // look expensive, we should add them to the CSet anyway to make
1345 // sure we go through the available old regions in no more than the
1346 // maximum desired number of mixed GCs.
1347 //
1348 // The calculation is based on the number of marked regions we added
1349 // to the CSet candidates in the first place, not how many remain, so
1350 // that the result is the same during all mixed GCs that follow a cycle.
1351
1352 const size_t region_num = _collection_set->candidates()->num_regions();
1353 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1354 size_t result = region_num / gc_num;
1355 // emulate ceiling
1356 if (result * gc_num < region_num) {
1357 result += 1;
|