56 _predictor(G1ConfidencePercent / 100.0),
57 _analytics(new G1Analytics(&_predictor)),
58 _remset_tracker(),
59 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
60 _ihop_control(create_ihop_control(&_predictor)),
61 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
62 _full_collection_start_sec(0.0),
63 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
64 _young_list_target_length(0),
65 _young_list_fixed_length(0),
66 _young_list_max_length(0),
67 _eden_surv_rate_group(new G1SurvRateGroup()),
68 _survivor_surv_rate_group(new G1SurvRateGroup()),
69 _reserve_factor((double) G1ReservePercent / 100.0),
70 _reserve_regions(0),
71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
72 _free_regions_at_end_of_collection(0),
73 _rs_length(0),
74 _rs_length_prediction(0),
75 _pending_cards_at_gc_start(0),
76 _bytes_allocated_in_old_since_last_gc(0),
77 _initial_mark_to_mixed(),
78 _collection_set(NULL),
79 _g1h(NULL),
80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
81 _mark_remark_start_sec(0),
82 _mark_cleanup_start_sec(0),
83 _tenuring_threshold(MaxTenuringThreshold),
84 _max_survivor_regions(0),
85 _survivors_age_table(true)
86 {
87 }
88
89 G1Policy::~G1Policy() {
90 delete _ihop_control;
91 delete _young_gen_sizer;
92 }
93
94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
95 if (G1Arguments::is_heterogeneous_heap()) {
96 return new G1HeterogeneousHeapPolicy(gc_timer_stw);
443
444 collector_state()->set_in_full_gc(false);
445
446 // "Nuke" the heuristics that control the young/mixed GC
447 // transitions and make sure we start with young GCs after the Full GC.
448 collector_state()->set_in_young_only_phase(true);
449 collector_state()->set_in_young_gc_before_mixed(false);
450 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
451 collector_state()->set_in_initial_mark_gc(false);
452 collector_state()->set_mark_or_rebuild_in_progress(false);
453 collector_state()->set_clearing_next_bitmap(false);
454
455 _eden_surv_rate_group->start_adding_regions();
456 // also call this on any additional surv rate groups
457
458 _free_regions_at_end_of_collection = _g1h->num_free_regions();
459 _survivor_surv_rate_group->reset();
460 update_young_list_max_and_target_length();
461 update_rs_length_prediction();
462
463 _bytes_allocated_in_old_since_last_gc = 0;
464
465 record_pause(FullGC, _full_collection_start_sec, end_sec);
466 }
467
468 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
469 log_debug(gc, refine, stats)
470 ("%s refinement: %.2fms, refined: " SIZE_FORMAT
471 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
472 kind,
473 stats.refinement_time().seconds() * MILLIUNITS,
474 stats.refined_cards(),
475 stats.precleaned_cards(),
476 stats.dirtied_cards());
477 }
478
479 void G1Policy::record_concurrent_refinement_stats() {
480 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
481 _pending_cards_at_gc_start = dcqs.num_cards();
482
483 // Collect per-thread stats, mostly from mutator activity.
778
779 assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
780 "If the last pause has been an initial mark, we should not have been in the marking window");
781 if (this_pause_included_initial_mark) {
782 collector_state()->set_mark_or_rebuild_in_progress(true);
783 }
784
785 _free_regions_at_end_of_collection = _g1h->num_free_regions();
786
787 update_rs_length_prediction();
788
789 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
790 // that in this case we are not running in a "normal" operating mode.
791 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
792 // IHOP control wants to know the expected young gen length if it were not
793 // restrained by the heap reserve. Using the actual length would make the
794 // prediction too small and the limit the young gen every time we get to the
795 // predicted target occupancy.
796 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
797
798 update_ihop_prediction(app_time_ms / 1000.0,
799 _bytes_allocated_in_old_since_last_gc,
800 last_unrestrained_young_length * HeapRegion::GrainBytes,
801 this_pause_was_young_only);
802 _bytes_allocated_in_old_since_last_gc = 0;
803
804 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
805 } else {
806 // Any garbage collection triggered as periodic collection resets the time-to-mixed
807 // measurement. Periodic collection typically means that the application is "inactive", i.e.
808 // the marking threads may have received an uncharacterisic amount of cpu time
809 // for completing the marking, i.e. are faster than expected.
810 // This skews the predicted marking length towards smaller values which might cause
811 // the mark start being too late.
812 _initial_mark_to_mixed.reset();
813 }
814
815 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
816 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
817
818 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
819 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
820 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
821 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
822
|
56 _predictor(G1ConfidencePercent / 100.0),
57 _analytics(new G1Analytics(&_predictor)),
58 _remset_tracker(),
59 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
60 _ihop_control(create_ihop_control(&_predictor)),
61 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
62 _full_collection_start_sec(0.0),
63 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
64 _young_list_target_length(0),
65 _young_list_fixed_length(0),
66 _young_list_max_length(0),
67 _eden_surv_rate_group(new G1SurvRateGroup()),
68 _survivor_surv_rate_group(new G1SurvRateGroup()),
69 _reserve_factor((double) G1ReservePercent / 100.0),
70 _reserve_regions(0),
71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
72 _free_regions_at_end_of_collection(0),
73 _rs_length(0),
74 _rs_length_prediction(0),
75 _pending_cards_at_gc_start(0),
76 _old_gen_alloc_tracker(),
77 _initial_mark_to_mixed(),
78 _collection_set(NULL),
79 _g1h(NULL),
80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
81 _mark_remark_start_sec(0),
82 _mark_cleanup_start_sec(0),
83 _tenuring_threshold(MaxTenuringThreshold),
84 _max_survivor_regions(0),
85 _survivors_age_table(true)
86 {
87 }
88
89 G1Policy::~G1Policy() {
90 delete _ihop_control;
91 delete _young_gen_sizer;
92 }
93
94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
95 if (G1Arguments::is_heterogeneous_heap()) {
96 return new G1HeterogeneousHeapPolicy(gc_timer_stw);
443
444 collector_state()->set_in_full_gc(false);
445
446 // "Nuke" the heuristics that control the young/mixed GC
447 // transitions and make sure we start with young GCs after the Full GC.
448 collector_state()->set_in_young_only_phase(true);
449 collector_state()->set_in_young_gc_before_mixed(false);
450 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
451 collector_state()->set_in_initial_mark_gc(false);
452 collector_state()->set_mark_or_rebuild_in_progress(false);
453 collector_state()->set_clearing_next_bitmap(false);
454
455 _eden_surv_rate_group->start_adding_regions();
456 // also call this on any additional surv rate groups
457
458 _free_regions_at_end_of_collection = _g1h->num_free_regions();
459 _survivor_surv_rate_group->reset();
460 update_young_list_max_and_target_length();
461 update_rs_length_prediction();
462
463 _old_gen_alloc_tracker.reset_after_full_gc();
464
465 record_pause(FullGC, _full_collection_start_sec, end_sec);
466 }
467
468 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
469 log_debug(gc, refine, stats)
470 ("%s refinement: %.2fms, refined: " SIZE_FORMAT
471 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
472 kind,
473 stats.refinement_time().seconds() * MILLIUNITS,
474 stats.refined_cards(),
475 stats.precleaned_cards(),
476 stats.dirtied_cards());
477 }
478
479 void G1Policy::record_concurrent_refinement_stats() {
480 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
481 _pending_cards_at_gc_start = dcqs.num_cards();
482
483 // Collect per-thread stats, mostly from mutator activity.
778
779 assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
780 "If the last pause has been an initial mark, we should not have been in the marking window");
781 if (this_pause_included_initial_mark) {
782 collector_state()->set_mark_or_rebuild_in_progress(true);
783 }
784
785 _free_regions_at_end_of_collection = _g1h->num_free_regions();
786
787 update_rs_length_prediction();
788
789 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
790 // that in this case we are not running in a "normal" operating mode.
791 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
792 // IHOP control wants to know the expected young gen length if it were not
793 // restrained by the heap reserve. Using the actual length would make the
794 // prediction too small and the limit the young gen every time we get to the
795 // predicted target occupancy.
796 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
797
798 _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
799 update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
800 _old_gen_alloc_tracker.last_cycle_old_bytes(),
801 last_unrestrained_young_length * HeapRegion::GrainBytes,
802 this_pause_was_young_only);
803
804 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
805 } else {
806 // Any garbage collection triggered as periodic collection resets the time-to-mixed
807 // measurement. Periodic collection typically means that the application is "inactive", i.e.
808 // the marking threads may have received an uncharacterisic amount of cpu time
809 // for completing the marking, i.e. are faster than expected.
810 // This skews the predicted marking length towards smaller values which might cause
811 // the mark start being too late.
812 _initial_mark_to_mixed.reset();
813 }
814
815 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
816 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
817
818 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
819 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
820 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
821 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
822
|