312 assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
313
314 // In case some edge-condition makes the desired max length too small...
315 if (desired_max_length <= desired_min_length) {
316 return desired_min_length;
317 }
318
319 // We'll adjust min_young_length and max_young_length not to include
320 // the already allocated young regions (i.e., so they reflect the
321 // min and max eden regions we'll allocate). The base_min_length
322 // will be reflected in the predictions by the
323 // survivor_regions_evac_time prediction.
324 assert(desired_min_length > base_min_length, "invariant");
325 uint min_young_length = desired_min_length - base_min_length;
326 assert(desired_max_length > base_min_length, "invariant");
327 uint max_young_length = desired_max_length - base_min_length;
328
329 const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
330 const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
331 const size_t pending_cards = _analytics->predict_pending_cards();
332 const size_t scanned_cards = _analytics->predict_card_num(rs_length, true /* for_young_gc */);
333 const double base_time_ms =
334 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
335 survivor_regions_evac_time;
336 const uint available_free_regions = _free_regions_at_end_of_collection;
337 const uint base_free_regions =
338 available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
339
340 // Here, we will make sure that the shortest young length that
341 // makes sense fits within the target pause time.
342
343 G1YoungLengthPredictor p(collector_state()->mark_or_rebuild_in_progress(),
344 base_time_ms,
345 base_free_regions,
346 target_pause_time_ms,
347 this);
348 if (p.will_fit(min_young_length)) {
349 // The shortest young length will fit into the target pause time;
350 // we'll now check whether the absolute maximum number of young
351 // regions will fit in the target pause time. If not, we'll do
352 // a binary search between min_young_length and max_young_length.
353 if (p.will_fit(max_young_length)) {
354 // The maximum young length will fit into the target pause time.
696 if (collector_state()->in_young_gc_before_mixed()) {
697 assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
698 // This has been the young GC before we start doing mixed GCs. We already
699 // decided to start mixed GCs much earlier, so there is nothing to do except
700 // advancing the state.
701 collector_state()->set_in_young_only_phase(false);
702 collector_state()->set_in_young_gc_before_mixed(false);
703 } else if (!this_pause_was_young_only) {
704 // This is a mixed GC. Here we decide whether to continue doing more
705 // mixed GCs or not.
706 if (!next_gc_should_be_mixed("continue mixed GCs",
707 "do not continue mixed GCs")) {
708 collector_state()->set_in_young_only_phase(true);
709
710 clear_collection_set_candidates();
711 maybe_start_marking();
712 }
713 }
714
715 _short_lived_surv_rate_group->start_adding_regions();
716 // Do that for any other surv rate groups
717
718 double scan_hcc_time_ms = G1HotCardCache::default_use_cache() ? average_time_ms(G1GCPhaseTimes::MergeHCC) : 0.0;
719
720 if (update_stats) {
721 double cost_per_logged_card = 0.0;
722 size_t const pending_logged_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
723 if (pending_logged_cards > 0) {
724 cost_per_logged_card = logged_cards_processing_time() / pending_logged_cards;
725 _analytics->report_cost_per_logged_card_ms(cost_per_logged_card);
726 }
727 _analytics->report_cost_scan_hcc(scan_hcc_time_ms);
728
729 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
730 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
731 size_t remset_cards_scanned = 0;
732 // There might have been duplicate log buffer entries in the queues which could
733 // increase this value beyond the cards scanned. In this case attribute all cards
734 // to the log buffers.
735 if (pending_logged_cards <= total_cards_scanned) {
736 remset_cards_scanned = total_cards_scanned - pending_logged_cards;
737 }
738
739 double cost_per_remset_card_ms = 0.0;
740 if (remset_cards_scanned > 10) {
741 double avg_time_remset_scan = ((average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR)) *
742 remset_cards_scanned / total_cards_scanned) +
743 average_time_ms(G1GCPhaseTimes::MergeER) +
744 average_time_ms(G1GCPhaseTimes::MergeRS) +
745 average_time_ms(G1GCPhaseTimes::OptMergeRS);
746
747 cost_per_remset_card_ms = avg_time_remset_scan / remset_cards_scanned;
748 _analytics->report_cost_per_remset_card_ms(cost_per_remset_card_ms, this_pause_was_young_only);
749 }
750
751 if (_rs_length > 0) {
752 double cards_per_entry_ratio =
753 (double) remset_cards_scanned / (double) _rs_length;
754 _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, this_pause_was_young_only);
755 }
756
757 // This is defensive. For a while _rs_length could get
758 // smaller than _recorded_rs_length which was causing
759 // rs_length_diff to get very large and mess up the RSet length
760 // predictions. The reason was unsafe concurrent updates to the
761 // _inc_cset_recorded_rs_length field which the code below guards
762 // against (see CR 7118202). This bug has now been fixed (see CR
763 // 7119027). However, I'm still worried that
764 // _inc_cset_recorded_rs_length might still end up somewhat
765 // inaccurate. The concurrent refinement thread calculates an
766 // RSet's length concurrently with other CR threads updating it
767 // which might cause it to calculate the length incorrectly (if,
768 // say, it's in mid-coarsening). So I'll leave in the defensive
769 // conditional below just in case.
770 size_t rs_length_diff = 0;
771 size_t recorded_rs_length = _collection_set->recorded_rs_length();
772 if (_rs_length > recorded_rs_length) {
773 rs_length_diff = _rs_length - recorded_rs_length;
774 }
775 _analytics->report_rs_length_diff((double) rs_length_diff);
776
777 size_t copied_bytes = p->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSCopiedBytes);
778
779 if (copied_bytes > 0) {
780 double cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / copied_bytes;
781 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
782 }
783
784 if (_collection_set->young_region_length() > 0) {
785 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
786 _collection_set->young_region_length());
787 }
788
789 if (_collection_set->old_region_length() > 0) {
790 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
791 _collection_set->old_region_length());
792 }
793
794 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
795
796 // Do not update RS lengths and the number of pending cards with information from mixed gc:
825 update_ihop_prediction(app_time_ms / 1000.0,
826 _bytes_allocated_in_old_since_last_gc,
827 last_unrestrained_young_length * HeapRegion::GrainBytes,
828 this_pause_was_young_only);
829 _bytes_allocated_in_old_since_last_gc = 0;
830
831 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
832 } else {
833 // Any garbage collection triggered as periodic collection resets the time-to-mixed
834 // measurement. Periodic collection typically means that the application is "inactive", i.e.
835 // the marking threads may have received an uncharacterisic amount of cpu time
836 // for completing the marking, i.e. are faster than expected.
837 // This skews the predicted marking length towards smaller values which might cause
838 // the mark start being too late.
839 _initial_mark_to_mixed.reset();
840 }
841
842 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
843 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
844
845 if (scan_logged_cards_time_goal_ms < scan_hcc_time_ms) {
846 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
847 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
848 scan_logged_cards_time_goal_ms, scan_hcc_time_ms);
849
850 scan_logged_cards_time_goal_ms = 0;
851 } else {
852 scan_logged_cards_time_goal_ms -= scan_hcc_time_ms;
853 }
854
855 _pending_cards_at_prev_gc_end = _g1h->pending_card_num();
856 double const logged_cards_time = logged_cards_processing_time();
857
858 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
859 scan_logged_cards_time_goal_ms, logged_cards_time, scan_hcc_time_ms);
860
861 _g1h->concurrent_refine()->adjust(logged_cards_time,
862 phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
863 scan_logged_cards_time_goal_ms);
864 }
865
866 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
867 if (G1UseAdaptiveIHOP) {
868 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
869 predictor,
870 G1ReservePercent,
871 G1HeapWastePercent);
872 } else {
873 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
874 }
875 }
876
877 void G1Policy::update_ihop_prediction(double mutator_time_s,
878 size_t mutator_alloc_bytes,
879 size_t young_gen_size,
919
920 void G1Policy::print_phases() {
921 phase_times()->print();
922 }
923
924 double G1Policy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
925 TruncatedSeq* seq = surv_rate_group->get_seq(age);
926 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
927 double pred = _predictor.get_new_prediction(seq);
928 if (pred > 1.0) {
929 pred = 1.0;
930 }
931 return pred;
932 }
933
934 double G1Policy::accum_yg_surv_rate_pred(int age) const {
935 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
936 }
937
938 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
939 size_t scanned_cards) const {
940 return
941 _analytics->predict_rs_update_time_ms(pending_cards) +
942 _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->in_young_only_phase()) +
943 _analytics->predict_constant_other_time_ms();
944 }
945
946 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
947 size_t rs_length = _analytics->predict_rs_length();
948 size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->in_young_only_phase());
949 return predict_base_elapsed_time_ms(pending_cards, card_num);
950 }
951
952 size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
953 size_t bytes_to_copy;
954 if (!hr->is_young()) {
955 bytes_to_copy = hr->max_live_bytes();
956 } else {
957 assert(hr->age_in_surv_rate_group() != -1, "invariant");
958 int age = hr->age_in_surv_rate_group();
959 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
960 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
961 }
962 return bytes_to_copy;
963 }
964
965 double G1Policy::predict_region_elapsed_time_ms(HeapRegion* hr,
966 bool for_young_gc) const {
967 size_t rs_length = hr->rem_set()->occupied();
968 // Predicting the number of cards is based on which type of GC
969 // we're predicting for.
970 size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
971 size_t bytes_to_copy = predict_bytes_to_copy(hr);
972
973 double region_elapsed_time_ms =
974 _analytics->predict_rs_scan_time_ms(card_num, collector_state()->in_young_only_phase()) +
975 _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
976
977 // The prediction of the "other" time for this region is based
978 // upon the region type and NOT the GC type.
979 if (hr->is_young()) {
980 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
981 } else {
982 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
983 }
984 return region_elapsed_time_ms;
985 }
986
987 bool G1Policy::should_allocate_mutator_region() const {
988 uint young_list_length = _g1h->young_regions_count();
989 uint young_list_target_length = _young_list_target_length;
990 return young_list_length < young_list_target_length;
991 }
992
993 bool G1Policy::can_expand_young_list() const {
994 uint young_list_length = _g1h->young_regions_count();
|
312 assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
313
314 // In case some edge-condition makes the desired max length too small...
315 if (desired_max_length <= desired_min_length) {
316 return desired_min_length;
317 }
318
319 // We'll adjust min_young_length and max_young_length not to include
320 // the already allocated young regions (i.e., so they reflect the
321 // min and max eden regions we'll allocate). The base_min_length
322 // will be reflected in the predictions by the
323 // survivor_regions_evac_time prediction.
324 assert(desired_min_length > base_min_length, "invariant");
325 uint min_young_length = desired_min_length - base_min_length;
326 assert(desired_max_length > base_min_length, "invariant");
327 uint max_young_length = desired_max_length - base_min_length;
328
329 const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
330 const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
331 const size_t pending_cards = _analytics->predict_pending_cards();
332 const double base_time_ms =
333 predict_base_elapsed_time_ms(pending_cards, rs_length) +
334 survivor_regions_evac_time;
335 const uint available_free_regions = _free_regions_at_end_of_collection;
336 const uint base_free_regions =
337 available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
338
339 // Here, we will make sure that the shortest young length that
340 // makes sense fits within the target pause time.
341
342 G1YoungLengthPredictor p(collector_state()->mark_or_rebuild_in_progress(),
343 base_time_ms,
344 base_free_regions,
345 target_pause_time_ms,
346 this);
347 if (p.will_fit(min_young_length)) {
348 // The shortest young length will fit into the target pause time;
349 // we'll now check whether the absolute maximum number of young
350 // regions will fit in the target pause time. If not, we'll do
351 // a binary search between min_young_length and max_young_length.
352 if (p.will_fit(max_young_length)) {
353 // The maximum young length will fit into the target pause time.
695 if (collector_state()->in_young_gc_before_mixed()) {
696 assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
697 // This has been the young GC before we start doing mixed GCs. We already
698 // decided to start mixed GCs much earlier, so there is nothing to do except
699 // advancing the state.
700 collector_state()->set_in_young_only_phase(false);
701 collector_state()->set_in_young_gc_before_mixed(false);
702 } else if (!this_pause_was_young_only) {
703 // This is a mixed GC. Here we decide whether to continue doing more
704 // mixed GCs or not.
705 if (!next_gc_should_be_mixed("continue mixed GCs",
706 "do not continue mixed GCs")) {
707 collector_state()->set_in_young_only_phase(true);
708
709 clear_collection_set_candidates();
710 maybe_start_marking();
711 }
712 }
713
714 _short_lived_surv_rate_group->start_adding_regions();
715
716 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
717 if (update_stats) {
718 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
719 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
720 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase.
721 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
722 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
723 total_log_buffer_cards;
724
725 // The threshold for the number of cards in a given sampling which we consider
726 // large enough so that the impact from setup and other costs is negligible.
727 size_t const CardsNumSamplingThreshold = 10;
728
729 if (total_cards_merged > CardsNumSamplingThreshold) {
730 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) +
731 average_time_ms(G1GCPhaseTimes::MergeRS) +
732 average_time_ms(G1GCPhaseTimes::MergeHCC) +
733 average_time_ms(G1GCPhaseTimes::MergeLB) +
734 average_time_ms(G1GCPhaseTimes::OptMergeRS);
735 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only);
736 }
737
738 // Update prediction for card scan
739 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
740 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
741
742 if (total_cards_scanned > CardsNumSamplingThreshold) {
743 double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) +
744 average_time_ms(G1GCPhaseTimes::OptScanHR);
745
746 _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, this_pause_was_young_only);
747 }
748
749 // Update prediction for the ratio between cards from the remembered
750 // sets and actually scanned cards from the remembered sets.
751 // Cards from the remembered sets are all cards not duplicated by cards from
752 // the logs.
753 // Due to duplicates in the log buffers, the number of actually scanned cards
754 // can be smaller than the cards in the log buffers.
755 const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0;
756 double merge_to_scan_ratio = 0.0;
757 if (total_cards_scanned > 0) {
758 merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned;
759 }
760 _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, this_pause_was_young_only);
761
762 const size_t recorded_rs_length = _collection_set->recorded_rs_length();
763 const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0;
764 _analytics->report_rs_length_diff(rs_length_diff);
765
766 // Update prediction for copy cost per byte
767 size_t copied_bytes = p->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSCopiedBytes);
768
769 if (copied_bytes > 0) {
770 double cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / copied_bytes;
771 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
772 }
773
774 if (_collection_set->young_region_length() > 0) {
775 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
776 _collection_set->young_region_length());
777 }
778
779 if (_collection_set->old_region_length() > 0) {
780 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
781 _collection_set->old_region_length());
782 }
783
784 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
785
786 // Do not update RS lengths and the number of pending cards with information from mixed gc:
815 update_ihop_prediction(app_time_ms / 1000.0,
816 _bytes_allocated_in_old_since_last_gc,
817 last_unrestrained_young_length * HeapRegion::GrainBytes,
818 this_pause_was_young_only);
819 _bytes_allocated_in_old_since_last_gc = 0;
820
821 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
822 } else {
823 // Any garbage collection triggered as periodic collection resets the time-to-mixed
824 // measurement. Periodic collection typically means that the application is "inactive", i.e.
825 // the marking threads may have received an uncharacterisic amount of cpu time
826 // for completing the marking, i.e. are faster than expected.
827 // This skews the predicted marking length towards smaller values which might cause
828 // the mark start being too late.
829 _initial_mark_to_mixed.reset();
830 }
831
832 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
833 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
834
835 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
836 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
837 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
838 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
839
840 scan_logged_cards_time_goal_ms = 0;
841 } else {
842 scan_logged_cards_time_goal_ms -= merge_hcc_time_ms;
843 }
844
845 _pending_cards_at_prev_gc_end = _g1h->pending_card_num();
846 double const logged_cards_time = logged_cards_processing_time();
847
848 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
849 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
850
851 _g1h->concurrent_refine()->adjust(logged_cards_time,
852 phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
853 scan_logged_cards_time_goal_ms);
854 }
855
856 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
857 if (G1UseAdaptiveIHOP) {
858 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
859 predictor,
860 G1ReservePercent,
861 G1HeapWastePercent);
862 } else {
863 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
864 }
865 }
866
867 void G1Policy::update_ihop_prediction(double mutator_time_s,
868 size_t mutator_alloc_bytes,
869 size_t young_gen_size,
909
910 void G1Policy::print_phases() {
911 phase_times()->print();
912 }
913
914 double G1Policy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
915 TruncatedSeq* seq = surv_rate_group->get_seq(age);
916 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
917 double pred = _predictor.get_new_prediction(seq);
918 if (pred > 1.0) {
919 pred = 1.0;
920 }
921 return pred;
922 }
923
924 double G1Policy::accum_yg_surv_rate_pred(int age) const {
925 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
926 }
927
928 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
929 size_t rs_length) const {
930 size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());
931 return
932 _analytics->predict_card_merge_time_ms(pending_cards + rs_length, collector_state()->in_young_only_phase()) +
933 _analytics->predict_card_scan_time_ms(effective_scanned_cards, collector_state()->in_young_only_phase()) +
934 _analytics->predict_constant_other_time_ms();
935 }
936
937 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
938 size_t rs_length = _analytics->predict_rs_length();
939 return predict_base_elapsed_time_ms(pending_cards, rs_length);
940 }
941
942 size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
943 size_t bytes_to_copy;
944 if (!hr->is_young()) {
945 bytes_to_copy = hr->max_live_bytes();
946 } else {
947 assert(hr->age_in_surv_rate_group() != -1, "invariant");
948 int age = hr->age_in_surv_rate_group();
949 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
950 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
951 }
952 return bytes_to_copy;
953 }
954
955 double G1Policy::predict_region_elapsed_time_ms(HeapRegion* hr,
956 bool for_young_gc) const {
957 size_t rs_length = hr->rem_set()->occupied();
958 size_t scan_card_num = _analytics->predict_scan_card_num(rs_length, for_young_gc);
959
960 size_t bytes_to_copy = predict_bytes_to_copy(hr);
961
962 double region_elapsed_time_ms =
963 _analytics->predict_card_merge_time_ms(rs_length, collector_state()->in_young_only_phase()) +
964 _analytics->predict_card_scan_time_ms(scan_card_num, collector_state()->in_young_only_phase()) +
965 _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
966
967 // The prediction of the "other" time for this region is based
968 // upon the region type and NOT the GC type.
969 if (hr->is_young()) {
970 region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
971 } else {
972 region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
973 }
974 return region_elapsed_time_ms;
975 }
976
977 bool G1Policy::should_allocate_mutator_region() const {
978 uint young_list_length = _g1h->young_regions_count();
979 uint young_list_target_length = _young_list_target_length;
980 return young_list_length < young_list_target_length;
981 }
982
983 bool G1Policy::can_expand_young_list() const {
984 uint young_list_length = _g1h->young_regions_count();
|