8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentG1Refine.hpp"
27 #include "gc/g1/concurrentMarkThread.inline.hpp"
28 #include "gc/g1/g1CollectedHeap.inline.hpp"
29 #include "gc/g1/g1CollectionSet.hpp"
30 #include "gc/g1/g1CollectorPolicy.hpp"
31 #include "gc/g1/g1ConcurrentMark.hpp"
32 #include "gc/g1/g1IHOPControl.hpp"
33 #include "gc/g1/g1GCPhaseTimes.hpp"
34 #include "gc/g1/g1Measurements.hpp"
35 #include "gc/g1/g1YoungGenSizer.hpp"
36 #include "gc/g1/heapRegion.inline.hpp"
37 #include "gc/g1/heapRegionRemSet.hpp"
38 #include "gc/shared/gcPolicyCounters.hpp"
39 #include "runtime/arguments.hpp"
40 #include "runtime/java.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "utilities/debug.hpp"
43 #include "utilities/pair.hpp"
44
45 G1CollectorPolicy::G1CollectorPolicy() :
46 _predictor(G1ConfidencePercent / 100.0),
47 _measurements(new G1Measurements(&_predictor)),
48 _pause_time_target_ms((double) MaxGCPauseMillis),
49 _rs_lengths_prediction(0),
50 _max_survivor_regions(0),
51 _survivors_age_table(true),
52 _gc_overhead_perc(0.0),
53
54 _bytes_allocated_in_old_since_last_gc(0),
55 _ihop_control(NULL),
56 _initial_mark_to_mixed() {
57
58 // SurvRateGroups below must be initialized after the predictor because they
59 // indirectly use it through this object passed to their constructor.
60 _short_lived_surv_rate_group =
61 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
62 _survivor_surv_rate_group =
63 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
64
65 // Set up the region size and associated fields. Given that the
66 // policy is created before the heap, we have to set this up here,
67 // so it's done as soon as possible.
223 phase_times()->note_gc_start(num_active_workers);
224 }
225
226 // Create the jstat counters for the policy.
227 void G1CollectorPolicy::initialize_gc_policy_counters() {
228 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
229 }
230
231 bool G1CollectorPolicy::predict_will_fit(uint young_length,
232 double base_time_ms,
233 uint base_free_regions,
234 double target_pause_time_ms) const {
235 if (young_length >= base_free_regions) {
236 // end condition 1: not enough space for the young regions
237 return false;
238 }
239
240 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
241 size_t bytes_to_copy =
242 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
243 double copy_time_ms = _measurements->predict_object_copy_time_ms(bytes_to_copy,
244 collector_state()->during_concurrent_mark());
245 double young_other_time_ms = _measurements->predict_young_other_time_ms(young_length);
246 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
247 if (pause_time_ms > target_pause_time_ms) {
248 // end condition 2: prediction is over the target pause time
249 return false;
250 }
251
252 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
253
254 // When copying, we will likely need more bytes free than is live in the region.
255 // Add some safety margin to factor in the confidence of our guess, and the
256 // natural expected waste.
257 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
258 // of the calculation: the lower the confidence, the more headroom.
259 // (100 + TargetPLABWastePct) represents the increase in expected bytes during
260 // copying due to anticipated waste in the PLABs.
261 double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
262 size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
263
264 if (expected_bytes_to_copy > free_bytes) {
265 // end condition 3: out-of-space
269 // success!
270 return true;
271 }
272
273 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
274 // re-calculate the necessary reserve
275 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
276 // We use ceiling so that if reserve_regions_d is > 0.0 (but
277 // smaller than 1.0) we'll get 1.
278 _reserve_regions = (uint) ceil(reserve_regions_d);
279
280 _young_gen_sizer->heap_size_changed(new_number_of_regions);
281
282 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
283 }
284
285 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
286 uint base_min_length) const {
287 uint desired_min_length = 0;
288 if (adaptive_young_list_length()) {
289 if (_measurements->num_alloc_rate_ms() > 3) {
290 double now_sec = os::elapsedTime();
291 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
292 double alloc_rate_ms = _measurements->predict_alloc_rate_ms();
293 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
294 } else {
295 // otherwise we don't have enough info to make the prediction
296 }
297 }
298 desired_min_length += base_min_length;
299 // make sure we don't go below any user-defined minimum bound
300 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
301 }
302
303 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
304 // Here, we might want to also take into account any additional
305 // constraints (i.e., user-defined minimum bound). Currently, we
306 // effectively don't set this bound.
307 return _young_gen_sizer->max_desired_young_length();
308 }
309
310 uint G1CollectorPolicy::update_young_list_max_and_target_length() {
311 return update_young_list_max_and_target_length(_measurements->predict_rs_lengths());
312 }
313
314 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
315 uint unbounded_target_length = update_young_list_target_length(rs_lengths);
316 update_max_gc_locker_expansion();
317 return unbounded_target_length;
318 }
319
320 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
321 YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
322 _young_list_target_length = young_lengths.first;
323 return young_lengths.second;
324 }
325
326 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const {
327 YoungTargetLengths result;
328
329 // Calculate the absolute and desired min bounds first.
330
331 // This is how many young regions we already have (currently: the survivors).
396 assert(adaptive_young_list_length(), "pre-condition");
397 assert(collector_state()->gcs_are_young(), "only call this for young GCs");
398
399 // In case some edge-condition makes the desired max length too small...
400 if (desired_max_length <= desired_min_length) {
401 return desired_min_length;
402 }
403
404 // We'll adjust min_young_length and max_young_length not to include
405 // the already allocated young regions (i.e., so they reflect the
406 // min and max eden regions we'll allocate). The base_min_length
407 // will be reflected in the predictions by the
408 // survivor_regions_evac_time prediction.
409 assert(desired_min_length > base_min_length, "invariant");
410 uint min_young_length = desired_min_length - base_min_length;
411 assert(desired_max_length > base_min_length, "invariant");
412 uint max_young_length = desired_max_length - base_min_length;
413
414 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
415 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
416 size_t pending_cards = _measurements->predict_pending_cards();
417 size_t adj_rs_lengths = rs_lengths + _measurements->predict_rs_length_diff();
418 size_t scanned_cards = _measurements->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
419 double base_time_ms =
420 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
421 survivor_regions_evac_time;
422 uint available_free_regions = _free_regions_at_end_of_collection;
423 uint base_free_regions = 0;
424 if (available_free_regions > _reserve_regions) {
425 base_free_regions = available_free_regions - _reserve_regions;
426 }
427
428 // Here, we will make sure that the shortest young length that
429 // makes sense fits within the target pause time.
430
431 if (predict_will_fit(min_young_length, base_time_ms,
432 base_free_regions, target_pause_time_ms)) {
433 // The shortest young length will fit into the target pause time;
434 // we'll now check whether the absolute maximum number of young
435 // regions will fit in the target pause time. If not, we'll do
436 // a binary search between min_young_length and max_young_length.
437 if (predict_will_fit(max_young_length, base_time_ms,
438 base_free_regions, target_pause_time_ms)) {
498 r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region();
499 r = r->get_next_young_region()) {
500 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
501 }
502 return survivor_regions_evac_time;
503 }
504
505 void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
506 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
507
508 if (rs_lengths > _rs_lengths_prediction) {
509 // add 10% to avoid having to recalculate often
510 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
511 update_rs_lengths_prediction(rs_lengths_prediction);
512
513 update_young_list_max_and_target_length(rs_lengths_prediction);
514 }
515 }
516
517 void G1CollectorPolicy::update_rs_lengths_prediction() {
518 update_rs_lengths_prediction(_measurements->predict_rs_lengths());
519 }
520
521 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
522 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
523 _rs_lengths_prediction = prediction;
524 }
525 }
526
527 #ifndef PRODUCT
528 bool G1CollectorPolicy::verify_young_ages() {
529 HeapRegion* head = _g1->young_list()->first_region();
530 return
531 verify_young_ages(head, _short_lived_surv_rate_group);
532 // also call verify_young_ages on any additional surv rate groups
533 }
534
535 bool
536 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
537 SurvRateGroup *surv_rate_group) {
538 guarantee( surv_rate_group != NULL, "pre-condition" );
566 }
567 }
568
569 return ret;
570 }
571 #endif // PRODUCT
572
573 void G1CollectorPolicy::record_full_collection_start() {
574 _full_collection_start_sec = os::elapsedTime();
575 // Release the future to-space so that it is available for compaction into.
576 collector_state()->set_full_collection(true);
577 }
578
579 void G1CollectorPolicy::record_full_collection_end() {
580 // Consider this like a collection pause for the purposes of allocation
581 // since last pause.
582 double end_sec = os::elapsedTime();
583 double full_gc_time_sec = end_sec - _full_collection_start_sec;
584 double full_gc_time_ms = full_gc_time_sec * 1000.0;
585
586 _measurements->update_recent_gc_times(end_sec, full_gc_time_ms);
587
588 collector_state()->set_full_collection(false);
589
590 // "Nuke" the heuristics that control the young/mixed GC
591 // transitions and make sure we start with young GCs after the Full GC.
592 collector_state()->set_gcs_are_young(true);
593 collector_state()->set_last_young_gc(false);
594 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
595 collector_state()->set_during_initial_mark_pause(false);
596 collector_state()->set_in_marking_window(false);
597 collector_state()->set_in_marking_window_im(false);
598
599 _short_lived_surv_rate_group->start_adding_regions();
600 // also call this on any additional surv rate groups
601
602 _free_regions_at_end_of_collection = _g1->num_free_regions();
603 // Reset survivors SurvRateGroup.
604 _survivor_surv_rate_group->reset();
605 update_young_list_max_and_target_length();
606 update_rs_lengths_prediction();
634 _survivors_age_table.clear();
635
636 assert( verify_young_ages(), "region age verification" );
637 }
638
639 void G1CollectorPolicy::record_concurrent_mark_init_end(double
640 mark_init_elapsed_time_ms) {
641 collector_state()->set_during_marking(true);
642 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
643 collector_state()->set_during_initial_mark_pause(false);
644 }
645
646 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
647 _mark_remark_start_sec = os::elapsedTime();
648 collector_state()->set_during_marking(false);
649 }
650
651 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
652 double end_time_sec = os::elapsedTime();
653 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
654 _measurements->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
655 _measurements->append_prev_collection_pause_end_ms(elapsed_time_ms);
656
657 record_pause(Remark, _mark_remark_start_sec, end_time_sec);
658 }
659
660 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
661 _mark_cleanup_start_sec = os::elapsedTime();
662 }
663
664 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
665 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
666 "skip last young-only gc");
667 collector_state()->set_last_young_gc(should_continue_with_reclaim);
668 // We skip the marking phase.
669 if (!should_continue_with_reclaim) {
670 abort_time_to_mixed_tracking();
671 }
672 collector_state()->set_in_marking_window(false);
673 }
674
675 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
734
735 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
736 double end_time_sec = os::elapsedTime();
737
738 size_t cur_used_bytes = _g1->used();
739 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
740 bool last_pause_included_initial_mark = false;
741 bool update_stats = !_g1->evacuation_failed();
742
743 NOT_PRODUCT(_short_lived_surv_rate_group->print());
744
745 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
746
747 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
748 if (last_pause_included_initial_mark) {
749 record_concurrent_mark_init_end(0.0);
750 } else {
751 maybe_start_marking();
752 }
753
754 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _measurements->prev_collection_pause_end_ms());
755 if (app_time_ms < MIN_TIMER_GRANULARITY) {
756 // This usually happens due to the timer not having the required
757 // granularity. Some Linuxes are the usual culprits.
758 // We'll just set it to something (arbitrarily) small.
759 app_time_ms = 1.0;
760 }
761
762 if (update_stats) {
763 // We maintain the invariant that all objects allocated by mutator
764 // threads will be allocated out of eden regions. So, we can use
765 // the eden region number allocated since the previous GC to
766 // calculate the application's allocate rate. The only exception
767 // to that is humongous objects that are allocated separately. But
768 // given that humongous object allocations do not really affect
769 // either the pause's duration nor when the next pause will take
770 // place we can safely ignore them here.
771 uint regions_allocated = _collection_set->eden_region_length();
772 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
773 _measurements->report_alloc_rate_ms(alloc_rate_ms);
774
775 _measurements->update_recent_gc_times(end_time_sec, pause_time_ms);
776 }
777
778 bool new_in_marking_window = collector_state()->in_marking_window();
779 bool new_in_marking_window_im = false;
780 if (last_pause_included_initial_mark) {
781 new_in_marking_window = true;
782 new_in_marking_window_im = true;
783 }
784
785 if (collector_state()->last_young_gc()) {
786 // This is supposed to to be the "last young GC" before we start
787 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
788 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
789
790 if (next_gc_should_be_mixed("start mixed GCs",
791 "do not start mixed GCs")) {
792 collector_state()->set_gcs_are_young(false);
793 } else {
794 // We aborted the mixed GC phase early.
795 abort_time_to_mixed_tracking();
801 if (!collector_state()->last_gc_was_young()) {
802 // This is a mixed GC. Here we decide whether to continue doing
803 // mixed GCs or not.
804 if (!next_gc_should_be_mixed("continue mixed GCs",
805 "do not continue mixed GCs")) {
806 collector_state()->set_gcs_are_young(true);
807
808 maybe_start_marking();
809 }
810 }
811
812 _short_lived_surv_rate_group->start_adding_regions();
813 // Do that for any other surv rate groups
814
815 double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
816
817 if (update_stats) {
818 double cost_per_card_ms = 0.0;
819 if (_pending_cards > 0) {
820 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
821 _measurements->report_cost_per_card_ms(cost_per_card_ms);
822 }
823 _measurements->report_cost_scan_hcc(scan_hcc_time_ms);
824
825 double cost_per_entry_ms = 0.0;
826 if (cards_scanned > 10) {
827 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
828 _measurements->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
829 }
830
831 if (_max_rs_lengths > 0) {
832 double cards_per_entry_ratio =
833 (double) cards_scanned / (double) _max_rs_lengths;
834 _measurements->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
835 }
836
837 // This is defensive. For a while _max_rs_lengths could get
838 // smaller than _recorded_rs_lengths which was causing
839 // rs_length_diff to get very large and mess up the RSet length
840 // predictions. The reason was unsafe concurrent updates to the
841 // _inc_cset_recorded_rs_lengths field which the code below guards
842 // against (see CR 7118202). This bug has now been fixed (see CR
843 // 7119027). However, I'm still worried that
844 // _inc_cset_recorded_rs_lengths might still end up somewhat
845 // inaccurate. The concurrent refinement thread calculates an
846 // RSet's length concurrently with other CR threads updating it
847 // which might cause it to calculate the length incorrectly (if,
848 // say, it's in mid-coarsening). So I'll leave in the defensive
849 // conditional below just in case.
850 size_t rs_length_diff = 0;
851 size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
852 if (_max_rs_lengths > recorded_rs_lengths) {
853 rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
854 }
855 _measurements->report_rs_length_diff((double) rs_length_diff);
856
857 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
858 size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
859 double cost_per_byte_ms = 0.0;
860
861 if (copied_bytes > 0) {
862 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
863 _measurements->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
864 }
865
866 if (_collection_set->young_region_length() > 0) {
867 _measurements->report_young_other_cost_per_region_ms(young_other_time_ms() /
868 _collection_set->young_region_length());
869 }
870
871 if (_collection_set->old_region_length() > 0) {
872 _measurements->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
873 _collection_set->old_region_length());
874 }
875
876 _measurements->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
877
878 _measurements->report_pending_cards((double) _pending_cards);
879 _measurements->report_rs_lengths((double) _max_rs_lengths);
880 }
881
882 collector_state()->set_in_marking_window(new_in_marking_window);
883 collector_state()->set_in_marking_window_im(new_in_marking_window_im);
884 _free_regions_at_end_of_collection = _g1->num_free_regions();
885 // IHOP control wants to know the expected young gen length if it were not
886 // restrained by the heap reserve. Using the actual length would make the
887 // prediction too small and the limit the young gen every time we get to the
888 // predicted target occupancy.
889 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
890 update_rs_lengths_prediction();
891
892 update_ihop_prediction(app_time_ms / 1000.0,
893 _bytes_allocated_in_old_since_last_gc,
894 last_unrestrained_young_length * HeapRegion::GrainBytes);
895 _bytes_allocated_in_old_since_last_gc = 0;
896
897 _ihop_control->send_trace_event(_g1->gc_tracer_stw());
898
899 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1017 TruncatedSeq* seq = surv_rate_group->get_seq(age);
1018 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
1019 double pred = _predictor.get_new_prediction(seq);
1020 if (pred > 1.0) {
1021 pred = 1.0;
1022 }
1023 return pred;
1024 }
1025
1026 double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
1027 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
1028 }
1029
1030 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
1031 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
1032 }
1033
1034 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1035 size_t scanned_cards) const {
1036 return
1037 _measurements->predict_rs_update_time_ms(pending_cards) +
1038 _measurements->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
1039 _measurements->predict_constant_other_time_ms();
1040 }
1041
1042 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
1043 size_t rs_length = _measurements->predict_rs_lengths() + _measurements->predict_rs_length_diff();
1044 size_t card_num = _measurements->predict_card_num(rs_length, collector_state()->gcs_are_young());
1045 return predict_base_elapsed_time_ms(pending_cards, card_num);
1046 }
1047
1048 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
1049 size_t bytes_to_copy;
1050 if (hr->is_marked())
1051 bytes_to_copy = hr->max_live_bytes();
1052 else {
1053 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1054 int age = hr->age_in_surv_rate_group();
1055 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1056 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
1057 }
1058 return bytes_to_copy;
1059 }
1060
1061 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1062 bool for_young_gc) const {
1063 size_t rs_length = hr->rem_set()->occupied();
1064 // Predicting the number of cards is based on which type of GC
1065 // we're predicting for.
1066 size_t card_num = _measurements->predict_card_num(rs_length, for_young_gc);
1067 size_t bytes_to_copy = predict_bytes_to_copy(hr);
1068
1069 double region_elapsed_time_ms =
1070 _measurements->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
1071 _measurements->predict_object_copy_time_ms(bytes_to_copy ,collector_state()->during_concurrent_mark());
1072
1073 // The prediction of the "other" time for this region is based
1074 // upon the region type and NOT the GC type.
1075 if (hr->is_young()) {
1076 region_elapsed_time_ms += _measurements->predict_young_other_time_ms(1);
1077 } else {
1078 region_elapsed_time_ms += _measurements->predict_non_young_other_time_ms(1);
1079 }
1080 return region_elapsed_time_ms;
1081 }
1082
1083 void G1CollectorPolicy::clear_ratio_check_data() {
1084 _ratio_over_threshold_count = 0;
1085 _ratio_over_threshold_sum = 0.0;
1086 _pauses_since_start = 0;
1087 }
1088
1089 size_t G1CollectorPolicy::expansion_amount() {
1090 double recent_gc_overhead = _measurements->recent_avg_pause_time_ratio() * 100.0;
1091 double last_gc_overhead = _measurements->last_pause_time_ratio() * 100.0;
1092 double threshold = _gc_overhead_perc;
1093 size_t expand_bytes = 0;
1094
1095 // If the heap is at less than half its maximum size, scale the threshold down,
1096 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
1097 // though the scaling code will likely keep the increase small.
1098 if (_g1->capacity() <= _g1->max_capacity() / 2) {
1099 threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
1100 threshold = MAX2(threshold, 1.0);
1101 }
1102
1103 // If the last GC time ratio is over the threshold, increment the count of
1104 // times it has been exceeded, and add this ratio to the sum of exceeded
1105 // ratios.
1106 if (last_gc_overhead > threshold) {
1107 _ratio_over_threshold_count++;
1108 _ratio_over_threshold_sum += last_gc_overhead;
1109 }
1110
1111 // Check if we've had enough GC time ratio checks that were over the
1356 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1357 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1358 }
1359
1360 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1361 cset_chooser()->clear();
1362
1363 WorkGang* workers = _g1->workers();
1364 uint n_workers = workers->active_workers();
1365
1366 uint n_regions = _g1->num_regions();
1367 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1368 cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1369 ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers);
1370 workers->run_task(&par_known_garbage_task);
1371
1372 cset_chooser()->sort_regions();
1373
1374 double end_sec = os::elapsedTime();
1375 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1376 _measurements->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1377 _measurements->append_prev_collection_pause_end_ms(elapsed_time_ms);
1378
1379 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1380 }
1381
1382 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1383 // Returns the given amount of reclaimable bytes (that represents
1384 // the amount of reclaimable space still to be collected) as a
1385 // percentage of the current heap capacity.
1386 size_t capacity_bytes = _g1->capacity();
1387 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1388 }
1389
1390 void G1CollectorPolicy::maybe_start_marking() {
1391 if (need_to_start_conc_mark("end of GC")) {
1392 // Note: this might have already been set, if during the last
1393 // pause we decided to start a cycle but at the beginning of
1394 // this pause we decided to postpone it. That's OK.
1395 collector_state()->set_initiate_conc_mark_if_possible(true);
1396 }
1397 }
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentG1Refine.hpp"
27 #include "gc/g1/concurrentMarkThread.inline.hpp"
28 #include "gc/g1/g1Analytics.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/g1CollectionSet.hpp"
31 #include "gc/g1/g1CollectorPolicy.hpp"
32 #include "gc/g1/g1ConcurrentMark.hpp"
33 #include "gc/g1/g1IHOPControl.hpp"
34 #include "gc/g1/g1GCPhaseTimes.hpp"
35 #include "gc/g1/g1YoungGenSizer.hpp"
36 #include "gc/g1/heapRegion.inline.hpp"
37 #include "gc/g1/heapRegionRemSet.hpp"
38 #include "gc/shared/gcPolicyCounters.hpp"
39 #include "runtime/arguments.hpp"
40 #include "runtime/java.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "utilities/debug.hpp"
43 #include "utilities/pair.hpp"
44
45 G1CollectorPolicy::G1CollectorPolicy() :
46 _predictor(G1ConfidencePercent / 100.0),
47 _analytics(new G1Analytics(&_predictor)),
48 _pause_time_target_ms((double) MaxGCPauseMillis),
49 _rs_lengths_prediction(0),
50 _max_survivor_regions(0),
51 _survivors_age_table(true),
52 _gc_overhead_perc(0.0),
53
54 _bytes_allocated_in_old_since_last_gc(0),
55 _ihop_control(NULL),
56 _initial_mark_to_mixed() {
57
58 // SurvRateGroups below must be initialized after the predictor because they
59 // indirectly use it through this object passed to their constructor.
60 _short_lived_surv_rate_group =
61 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
62 _survivor_surv_rate_group =
63 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
64
65 // Set up the region size and associated fields. Given that the
66 // policy is created before the heap, we have to set this up here,
67 // so it's done as soon as possible.
223 phase_times()->note_gc_start(num_active_workers);
224 }
225
226 // Create the jstat counters for the policy.
227 void G1CollectorPolicy::initialize_gc_policy_counters() {
228 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
229 }
230
231 bool G1CollectorPolicy::predict_will_fit(uint young_length,
232 double base_time_ms,
233 uint base_free_regions,
234 double target_pause_time_ms) const {
235 if (young_length >= base_free_regions) {
236 // end condition 1: not enough space for the young regions
237 return false;
238 }
239
240 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
241 size_t bytes_to_copy =
242 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
243 double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy,
244 collector_state()->during_concurrent_mark());
245 double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length);
246 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
247 if (pause_time_ms > target_pause_time_ms) {
248 // end condition 2: prediction is over the target pause time
249 return false;
250 }
251
252 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
253
254 // When copying, we will likely need more bytes free than is live in the region.
255 // Add some safety margin to factor in the confidence of our guess, and the
256 // natural expected waste.
257 // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
258 // of the calculation: the lower the confidence, the more headroom.
259 // (100 + TargetPLABWastePct) represents the increase in expected bytes during
260 // copying due to anticipated waste in the PLABs.
261 double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
262 size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
263
264 if (expected_bytes_to_copy > free_bytes) {
265 // end condition 3: out-of-space
269 // success!
270 return true;
271 }
272
273 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
274 // re-calculate the necessary reserve
275 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
276 // We use ceiling so that if reserve_regions_d is > 0.0 (but
277 // smaller than 1.0) we'll get 1.
278 _reserve_regions = (uint) ceil(reserve_regions_d);
279
280 _young_gen_sizer->heap_size_changed(new_number_of_regions);
281
282 _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
283 }
284
285 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
286 uint base_min_length) const {
287 uint desired_min_length = 0;
288 if (adaptive_young_list_length()) {
289 if (_analytics->num_alloc_rate_ms() > 3) {
290 double now_sec = os::elapsedTime();
291 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
292 double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
293 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
294 } else {
295 // otherwise we don't have enough info to make the prediction
296 }
297 }
298 desired_min_length += base_min_length;
299 // make sure we don't go below any user-defined minimum bound
300 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
301 }
302
303 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
304 // Here, we might want to also take into account any additional
305 // constraints (i.e., user-defined minimum bound). Currently, we
306 // effectively don't set this bound.
307 return _young_gen_sizer->max_desired_young_length();
308 }
309
310 uint G1CollectorPolicy::update_young_list_max_and_target_length() {
311 return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
312 }
313
314 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
315 uint unbounded_target_length = update_young_list_target_length(rs_lengths);
316 update_max_gc_locker_expansion();
317 return unbounded_target_length;
318 }
319
320 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
321 YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
322 _young_list_target_length = young_lengths.first;
323 return young_lengths.second;
324 }
325
326 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const {
327 YoungTargetLengths result;
328
329 // Calculate the absolute and desired min bounds first.
330
331 // This is how many young regions we already have (currently: the survivors).
396 assert(adaptive_young_list_length(), "pre-condition");
397 assert(collector_state()->gcs_are_young(), "only call this for young GCs");
398
399 // In case some edge-condition makes the desired max length too small...
400 if (desired_max_length <= desired_min_length) {
401 return desired_min_length;
402 }
403
404 // We'll adjust min_young_length and max_young_length not to include
405 // the already allocated young regions (i.e., so they reflect the
406 // min and max eden regions we'll allocate). The base_min_length
407 // will be reflected in the predictions by the
408 // survivor_regions_evac_time prediction.
409 assert(desired_min_length > base_min_length, "invariant");
410 uint min_young_length = desired_min_length - base_min_length;
411 assert(desired_max_length > base_min_length, "invariant");
412 uint max_young_length = desired_max_length - base_min_length;
413
414 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
415 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
416 size_t pending_cards = _analytics->predict_pending_cards();
417 size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
418 size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
419 double base_time_ms =
420 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
421 survivor_regions_evac_time;
422 uint available_free_regions = _free_regions_at_end_of_collection;
423 uint base_free_regions = 0;
424 if (available_free_regions > _reserve_regions) {
425 base_free_regions = available_free_regions - _reserve_regions;
426 }
427
428 // Here, we will make sure that the shortest young length that
429 // makes sense fits within the target pause time.
430
431 if (predict_will_fit(min_young_length, base_time_ms,
432 base_free_regions, target_pause_time_ms)) {
433 // The shortest young length will fit into the target pause time;
434 // we'll now check whether the absolute maximum number of young
435 // regions will fit in the target pause time. If not, we'll do
436 // a binary search between min_young_length and max_young_length.
437 if (predict_will_fit(max_young_length, base_time_ms,
438 base_free_regions, target_pause_time_ms)) {
498 r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region();
499 r = r->get_next_young_region()) {
500 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
501 }
502 return survivor_regions_evac_time;
503 }
504
505 void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
506 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
507
508 if (rs_lengths > _rs_lengths_prediction) {
509 // add 10% to avoid having to recalculate often
510 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
511 update_rs_lengths_prediction(rs_lengths_prediction);
512
513 update_young_list_max_and_target_length(rs_lengths_prediction);
514 }
515 }
516
517 void G1CollectorPolicy::update_rs_lengths_prediction() {
518 update_rs_lengths_prediction(_analytics->predict_rs_lengths());
519 }
520
521 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
522 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
523 _rs_lengths_prediction = prediction;
524 }
525 }
526
527 #ifndef PRODUCT
528 bool G1CollectorPolicy::verify_young_ages() {
529 HeapRegion* head = _g1->young_list()->first_region();
530 return
531 verify_young_ages(head, _short_lived_surv_rate_group);
532 // also call verify_young_ages on any additional surv rate groups
533 }
534
535 bool
536 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
537 SurvRateGroup *surv_rate_group) {
538 guarantee( surv_rate_group != NULL, "pre-condition" );
566 }
567 }
568
569 return ret;
570 }
571 #endif // PRODUCT
572
573 void G1CollectorPolicy::record_full_collection_start() {
574 _full_collection_start_sec = os::elapsedTime();
575 // Release the future to-space so that it is available for compaction into.
576 collector_state()->set_full_collection(true);
577 }
578
579 void G1CollectorPolicy::record_full_collection_end() {
580 // Consider this like a collection pause for the purposes of allocation
581 // since last pause.
582 double end_sec = os::elapsedTime();
583 double full_gc_time_sec = end_sec - _full_collection_start_sec;
584 double full_gc_time_ms = full_gc_time_sec * 1000.0;
585
586 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
587
588 collector_state()->set_full_collection(false);
589
590 // "Nuke" the heuristics that control the young/mixed GC
591 // transitions and make sure we start with young GCs after the Full GC.
592 collector_state()->set_gcs_are_young(true);
593 collector_state()->set_last_young_gc(false);
594 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
595 collector_state()->set_during_initial_mark_pause(false);
596 collector_state()->set_in_marking_window(false);
597 collector_state()->set_in_marking_window_im(false);
598
599 _short_lived_surv_rate_group->start_adding_regions();
600 // also call this on any additional surv rate groups
601
602 _free_regions_at_end_of_collection = _g1->num_free_regions();
603 // Reset survivors SurvRateGroup.
604 _survivor_surv_rate_group->reset();
605 update_young_list_max_and_target_length();
606 update_rs_lengths_prediction();
634 _survivors_age_table.clear();
635
636 assert( verify_young_ages(), "region age verification" );
637 }
638
639 void G1CollectorPolicy::record_concurrent_mark_init_end(double
640 mark_init_elapsed_time_ms) {
641 collector_state()->set_during_marking(true);
642 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
643 collector_state()->set_during_initial_mark_pause(false);
644 }
645
646 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
647 _mark_remark_start_sec = os::elapsedTime();
648 collector_state()->set_during_marking(false);
649 }
650
651 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
652 double end_time_sec = os::elapsedTime();
653 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
654 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
655 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
656
657 record_pause(Remark, _mark_remark_start_sec, end_time_sec);
658 }
659
660 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
661 _mark_cleanup_start_sec = os::elapsedTime();
662 }
663
664 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
665 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
666 "skip last young-only gc");
667 collector_state()->set_last_young_gc(should_continue_with_reclaim);
668 // We skip the marking phase.
669 if (!should_continue_with_reclaim) {
670 abort_time_to_mixed_tracking();
671 }
672 collector_state()->set_in_marking_window(false);
673 }
674
675 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
734
735 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
736 double end_time_sec = os::elapsedTime();
737
738 size_t cur_used_bytes = _g1->used();
739 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
740 bool last_pause_included_initial_mark = false;
741 bool update_stats = !_g1->evacuation_failed();
742
743 NOT_PRODUCT(_short_lived_surv_rate_group->print());
744
745 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
746
747 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
748 if (last_pause_included_initial_mark) {
749 record_concurrent_mark_init_end(0.0);
750 } else {
751 maybe_start_marking();
752 }
753
754 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
755 if (app_time_ms < MIN_TIMER_GRANULARITY) {
756 // This usually happens due to the timer not having the required
757 // granularity. Some Linuxes are the usual culprits.
758 // We'll just set it to something (arbitrarily) small.
759 app_time_ms = 1.0;
760 }
761
762 if (update_stats) {
763 // We maintain the invariant that all objects allocated by mutator
764 // threads will be allocated out of eden regions. So, we can use
765 // the eden region number allocated since the previous GC to
766 // calculate the application's allocate rate. The only exception
767 // to that is humongous objects that are allocated separately. But
768 // given that humongous object allocations do not really affect
769 // either the pause's duration nor when the next pause will take
770 // place we can safely ignore them here.
771 uint regions_allocated = _collection_set->eden_region_length();
772 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
773 _analytics->report_alloc_rate_ms(alloc_rate_ms);
774
775 double interval_ms =
776 (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
777 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
778 _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
779 }
780
781 bool new_in_marking_window = collector_state()->in_marking_window();
782 bool new_in_marking_window_im = false;
783 if (last_pause_included_initial_mark) {
784 new_in_marking_window = true;
785 new_in_marking_window_im = true;
786 }
787
788 if (collector_state()->last_young_gc()) {
789 // This is supposed to to be the "last young GC" before we start
790 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
791 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
792
793 if (next_gc_should_be_mixed("start mixed GCs",
794 "do not start mixed GCs")) {
795 collector_state()->set_gcs_are_young(false);
796 } else {
797 // We aborted the mixed GC phase early.
798 abort_time_to_mixed_tracking();
804 if (!collector_state()->last_gc_was_young()) {
805 // This is a mixed GC. Here we decide whether to continue doing
806 // mixed GCs or not.
807 if (!next_gc_should_be_mixed("continue mixed GCs",
808 "do not continue mixed GCs")) {
809 collector_state()->set_gcs_are_young(true);
810
811 maybe_start_marking();
812 }
813 }
814
815 _short_lived_surv_rate_group->start_adding_regions();
816 // Do that for any other surv rate groups
817
818 double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
819
820 if (update_stats) {
821 double cost_per_card_ms = 0.0;
822 if (_pending_cards > 0) {
823 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
824 _analytics->report_cost_per_card_ms(cost_per_card_ms);
825 }
826 _analytics->report_cost_scan_hcc(scan_hcc_time_ms);
827
828 double cost_per_entry_ms = 0.0;
829 if (cards_scanned > 10) {
830 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
831 _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
832 }
833
834 if (_max_rs_lengths > 0) {
835 double cards_per_entry_ratio =
836 (double) cards_scanned / (double) _max_rs_lengths;
837 _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
838 }
839
840 // This is defensive. For a while _max_rs_lengths could get
841 // smaller than _recorded_rs_lengths which was causing
842 // rs_length_diff to get very large and mess up the RSet length
843 // predictions. The reason was unsafe concurrent updates to the
844 // _inc_cset_recorded_rs_lengths field which the code below guards
845 // against (see CR 7118202). This bug has now been fixed (see CR
846 // 7119027). However, I'm still worried that
847 // _inc_cset_recorded_rs_lengths might still end up somewhat
848 // inaccurate. The concurrent refinement thread calculates an
849 // RSet's length concurrently with other CR threads updating it
850 // which might cause it to calculate the length incorrectly (if,
851 // say, it's in mid-coarsening). So I'll leave in the defensive
852 // conditional below just in case.
853 size_t rs_length_diff = 0;
854 size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
855 if (_max_rs_lengths > recorded_rs_lengths) {
856 rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
857 }
858 _analytics->report_rs_length_diff((double) rs_length_diff);
859
860 size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
861 size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
862 double cost_per_byte_ms = 0.0;
863
864 if (copied_bytes > 0) {
865 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
866 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
867 }
868
869 if (_collection_set->young_region_length() > 0) {
870 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
871 _collection_set->young_region_length());
872 }
873
874 if (_collection_set->old_region_length() > 0) {
875 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
876 _collection_set->old_region_length());
877 }
878
879 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
880
881 _analytics->report_pending_cards((double) _pending_cards);
882 _analytics->report_rs_lengths((double) _max_rs_lengths);
883 }
884
885 collector_state()->set_in_marking_window(new_in_marking_window);
886 collector_state()->set_in_marking_window_im(new_in_marking_window_im);
887 _free_regions_at_end_of_collection = _g1->num_free_regions();
888 // IHOP control wants to know the expected young gen length if it were not
889 // restrained by the heap reserve. Using the actual length would make the
890 // prediction too small and the limit the young gen every time we get to the
891 // predicted target occupancy.
892 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
893 update_rs_lengths_prediction();
894
895 update_ihop_prediction(app_time_ms / 1000.0,
896 _bytes_allocated_in_old_since_last_gc,
897 last_unrestrained_young_length * HeapRegion::GrainBytes);
898 _bytes_allocated_in_old_since_last_gc = 0;
899
900 _ihop_control->send_trace_event(_g1->gc_tracer_stw());
901
902 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1020 TruncatedSeq* seq = surv_rate_group->get_seq(age);
1021 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
1022 double pred = _predictor.get_new_prediction(seq);
1023 if (pred > 1.0) {
1024 pred = 1.0;
1025 }
1026 return pred;
1027 }
1028
1029 double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
1030 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
1031 }
1032
1033 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
1034 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
1035 }
1036
1037 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1038 size_t scanned_cards) const {
1039 return
1040 _analytics->predict_rs_update_time_ms(pending_cards) +
1041 _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
1042 _analytics->predict_constant_other_time_ms();
1043 }
1044
1045 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
1046 size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
1047 size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
1048 return predict_base_elapsed_time_ms(pending_cards, card_num);
1049 }
1050
1051 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
1052 size_t bytes_to_copy;
1053 if (hr->is_marked())
1054 bytes_to_copy = hr->max_live_bytes();
1055 else {
1056 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1057 int age = hr->age_in_surv_rate_group();
1058 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1059 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
1060 }
1061 return bytes_to_copy;
1062 }
1063
1064 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1065 bool for_young_gc) const {
1066 size_t rs_length = hr->rem_set()->occupied();
1067 // Predicting the number of cards is based on which type of GC
1068 // we're predicting for.
1069 size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
1070 size_t bytes_to_copy = predict_bytes_to_copy(hr);
1071
1072 double region_elapsed_time_ms =
1073 _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
1074 _analytics->predict_object_copy_time_ms(bytes_to_copy ,collector_state()->during_concurrent_mark());
1075
1076 // The prediction of the "other" time for this region is based
1077 // upon the region type and NOT the GC type.
1078 if (hr->is_young()) {
1079 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
1080 } else {
1081 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
1082 }
1083 return region_elapsed_time_ms;
1084 }
1085
1086 void G1CollectorPolicy::clear_ratio_check_data() {
1087 _ratio_over_threshold_count = 0;
1088 _ratio_over_threshold_sum = 0.0;
1089 _pauses_since_start = 0;
1090 }
1091
1092 size_t G1CollectorPolicy::expansion_amount() {
1093 double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
1094 double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
1095 double threshold = _gc_overhead_perc;
1096 size_t expand_bytes = 0;
1097
1098 // If the heap is at less than half its maximum size, scale the threshold down,
1099 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
1100 // though the scaling code will likely keep the increase small.
1101 if (_g1->capacity() <= _g1->max_capacity() / 2) {
1102 threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
1103 threshold = MAX2(threshold, 1.0);
1104 }
1105
1106 // If the last GC time ratio is over the threshold, increment the count of
1107 // times it has been exceeded, and add this ratio to the sum of exceeded
1108 // ratios.
1109 if (last_gc_overhead > threshold) {
1110 _ratio_over_threshold_count++;
1111 _ratio_over_threshold_sum += last_gc_overhead;
1112 }
1113
1114 // Check if we've had enough GC time ratio checks that were over the
1359 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1360 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1361 }
1362
1363 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1364 cset_chooser()->clear();
1365
1366 WorkGang* workers = _g1->workers();
1367 uint n_workers = workers->active_workers();
1368
1369 uint n_regions = _g1->num_regions();
1370 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1371 cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1372 ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers);
1373 workers->run_task(&par_known_garbage_task);
1374
1375 cset_chooser()->sort_regions();
1376
1377 double end_sec = os::elapsedTime();
1378 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1379 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1380 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1381
1382 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1383 }
1384
1385 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1386 // Returns the given amount of reclaimable bytes (that represents
1387 // the amount of reclaimable space still to be collected) as a
1388 // percentage of the current heap capacity.
1389 size_t capacity_bytes = _g1->capacity();
1390 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1391 }
1392
1393 void G1CollectorPolicy::maybe_start_marking() {
1394 if (need_to_start_conc_mark("end of GC")) {
1395 // Note: this might have already been set, if during the last
1396 // pause we decided to start a cycle but at the beginning of
1397 // this pause we decided to postpone it. That's OK.
1398 collector_state()->set_initiate_conc_mark_if_possible(true);
1399 }
1400 }
|