887
888 update_ihop_prediction(app_time_ms / 1000.0,
889 _bytes_allocated_in_old_since_last_gc,
890 last_unrestrained_young_length * HeapRegion::GrainBytes);
891 _bytes_allocated_in_old_since_last_gc = 0;
892
893 _ihop_control->send_trace_event(_g1->gc_tracer_stw());
894
895 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
896 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
897
898 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
899 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
900 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
901 update_rs_time_goal_ms, scan_hcc_time_ms);
902
903 update_rs_time_goal_ms = 0;
904 } else {
905 update_rs_time_goal_ms -= scan_hcc_time_ms;
906 }
907 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
908 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
909 update_rs_time_goal_ms);
910
911 cset_chooser()->verify();
912 }
913
914 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
915 if (G1UseAdaptiveIHOP) {
916 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
917 &_predictor,
918 G1ReservePercent,
919 G1HeapWastePercent);
920 } else {
921 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
922 }
923 }
924
925 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s,
926 size_t mutator_alloc_bytes,
927 size_t young_gen_size) {
949 // As an approximation for the young gc promotion rates during marking we use
950 // all of them. In many applications there are only a few if any young gcs during
951 // marking, which makes any prediction useless. This increases the accuracy of the
952 // prediction.
953 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
954 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
955 report = true;
956 }
957
958 if (report) {
959 report_ihop_statistics();
960 }
961 }
962
963 void G1CollectorPolicy::report_ihop_statistics() {
964 _ihop_control->print();
965 }
966
967 void G1CollectorPolicy::print_phases() {
968 phase_times()->print();
969 }
970
971 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
972 double update_rs_processed_buffers,
973 double goal_ms) {
974 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
975 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
976
977 if (G1UseAdaptiveConcRefinement) {
978 const int k_gy = 3, k_gr = 6;
979 const double inc_k = 1.1, dec_k = 0.9;
980
981 size_t g = cg1r->green_zone();
982 if (update_rs_time > goal_ms) {
983 g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
984 } else {
985 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
986 g = (size_t)MAX2(g * inc_k, g + 1.0);
987 }
988 }
989 // Change the refinement threads params
990 cg1r->set_green_zone(g);
991 cg1r->set_yellow_zone(g * k_gy);
992 cg1r->set_red_zone(g * k_gr);
993 cg1r->reinitialize_threads();
994
995 size_t processing_threshold_delta = MAX2<size_t>(cg1r->green_zone() * _predictor.sigma(), 1);
996 size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
997 cg1r->yellow_zone());
998 // Change the barrier params
999 dcqs.set_process_completed_threshold((int)processing_threshold);
1000 dcqs.set_max_completed_queue((int)cg1r->red_zone());
1001 }
1002
1003 size_t curr_queue_size = dcqs.completed_buffers_num();
1004 if (curr_queue_size >= cg1r->yellow_zone()) {
1005 dcqs.set_completed_queue_padding(curr_queue_size);
1006 } else {
1007 dcqs.set_completed_queue_padding(0);
1008 }
1009 dcqs.notify_if_necessary();
1010 }
1011
1012 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
1013 TruncatedSeq* seq = surv_rate_group->get_seq(age);
1014 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
1015 double pred = _predictor.get_new_prediction(seq);
1016 if (pred > 1.0) {
1017 pred = 1.0;
1018 }
1019 return pred;
1020 }
1021
1022 double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
1023 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
1024 }
1025
1026 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
1027 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
1028 }
1029
|
887
888 update_ihop_prediction(app_time_ms / 1000.0,
889 _bytes_allocated_in_old_since_last_gc,
890 last_unrestrained_young_length * HeapRegion::GrainBytes);
891 _bytes_allocated_in_old_since_last_gc = 0;
892
893 _ihop_control->send_trace_event(_g1->gc_tracer_stw());
894
895 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
896 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
897
898 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
899 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
900 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
901 update_rs_time_goal_ms, scan_hcc_time_ms);
902
903 update_rs_time_goal_ms = 0;
904 } else {
905 update_rs_time_goal_ms -= scan_hcc_time_ms;
906 }
907 _g1->concurrent_g1_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
908 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
909 update_rs_time_goal_ms);
910
911 cset_chooser()->verify();
912 }
913
914 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
915 if (G1UseAdaptiveIHOP) {
916 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
917 &_predictor,
918 G1ReservePercent,
919 G1HeapWastePercent);
920 } else {
921 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
922 }
923 }
924
925 void G1CollectorPolicy::update_ihop_prediction(double mutator_time_s,
926 size_t mutator_alloc_bytes,
927 size_t young_gen_size) {
949 // As an approximation for the young gc promotion rates during marking we use
950 // all of them. In many applications there are only a few if any young gcs during
951 // marking, which makes any prediction useless. This increases the accuracy of the
952 // prediction.
953 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
954 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
955 report = true;
956 }
957
958 if (report) {
959 report_ihop_statistics();
960 }
961 }
962
963 void G1CollectorPolicy::report_ihop_statistics() {
964 _ihop_control->print();
965 }
966
967 void G1CollectorPolicy::print_phases() {
968 phase_times()->print();
969 }
970
971 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
972 TruncatedSeq* seq = surv_rate_group->get_seq(age);
973 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
974 double pred = _predictor.get_new_prediction(seq);
975 if (pred > 1.0) {
976 pred = 1.0;
977 }
978 return pred;
979 }
980
981 double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
982 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
983 }
984
985 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
986 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
987 }
988
|