44 #include "gc/g1/heapRegionRemSet.hpp"
45 #include "gc/shared/concurrentGCBreakpoints.hpp"
46 #include "gc/shared/gcPolicyCounters.hpp"
47 #include "logging/log.hpp"
48 #include "runtime/arguments.hpp"
49 #include "runtime/globals.hpp"
50 #include "runtime/java.hpp"
51 #include "runtime/mutexLocker.hpp"
52 #include "utilities/debug.hpp"
53 #include "utilities/growableArray.hpp"
54 #include "utilities/pair.hpp"
55
56 G1Policy::G1Policy(STWGCTimer* gc_timer) :
57 _predictor(G1ConfidencePercent / 100.0),
58 _analytics(new G1Analytics(&_predictor)),
59 _remset_tracker(),
60 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
61 _ihop_control(create_ihop_control(&_predictor)),
62 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
63 _full_collection_start_sec(0.0),
64 _time_of_last_gc_ns(os::javaTimeNanos()),
65 _young_list_desired_length(0),
66 _young_list_target_length(0),
67 _young_list_max_length(0),
68 _eden_surv_rate_group(new G1SurvRateGroup()),
69 _survivor_surv_rate_group(new G1SurvRateGroup()),
70 _reserve_factor((double) G1ReservePercent / 100.0),
71 _reserve_regions(0),
72 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
73 _free_regions_at_end_of_collection(0),
74 _rs_length(0),
75 _rs_length_prediction(0),
76 _pending_cards_at_gc_start(0),
77 _old_gen_alloc_tracker(),
78 _initial_mark_to_mixed(),
79 _collection_set(NULL),
80 _g1h(NULL),
81 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
82 _mark_remark_start_sec(0),
83 _mark_cleanup_start_sec(0),
84 _tenuring_threshold(MaxTenuringThreshold),
733 if (logged_dirty_cards > scan_heap_roots_cards) {
734 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
735 }
736 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
737 }
738
739 // Anything below that is considered to be zero
740 #define MIN_TIMER_GRANULARITY 0.0000001
741
742 void G1Policy::record_collection_pause_end(double pause_time_ms) {
743 G1GCPhaseTimes* p = phase_times();
744
745 double end_time_sec = os::elapsedTime();
746
747 bool this_pause_included_initial_mark = false;
748 bool this_pause_was_young_only = collector_state()->in_young_only_phase();
749
750 bool update_stats = !_g1h->evacuation_failed();
751
752 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
753
754 _time_of_last_gc_ns = os::javaTimeNanos();
755
756 this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
757 if (this_pause_included_initial_mark) {
758 record_concurrent_mark_init_end(0.0);
759 } else {
760 maybe_start_marking();
761 }
762
763 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
764 if (app_time_ms < MIN_TIMER_GRANULARITY) {
765 // This usually happens due to the timer not having the required
766 // granularity. Some Linuxes are the usual culprits.
767 // We'll just set it to something (arbitrarily) small.
768 app_time_ms = 1.0;
769 }
770
771 if (update_stats) {
772 // We maintain the invariant that all objects allocated by mutator
773 // threads will be allocated out of eden regions. So, we can use
774 // the eden region number allocated since the previous GC to
|
44 #include "gc/g1/heapRegionRemSet.hpp"
45 #include "gc/shared/concurrentGCBreakpoints.hpp"
46 #include "gc/shared/gcPolicyCounters.hpp"
47 #include "logging/log.hpp"
48 #include "runtime/arguments.hpp"
49 #include "runtime/globals.hpp"
50 #include "runtime/java.hpp"
51 #include "runtime/mutexLocker.hpp"
52 #include "utilities/debug.hpp"
53 #include "utilities/growableArray.hpp"
54 #include "utilities/pair.hpp"
55
56 G1Policy::G1Policy(STWGCTimer* gc_timer) :
57 _predictor(G1ConfidencePercent / 100.0),
58 _analytics(new G1Analytics(&_predictor)),
59 _remset_tracker(),
60 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
61 _ihop_control(create_ihop_control(&_predictor)),
62 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
63 _full_collection_start_sec(0.0),
64 _young_list_desired_length(0),
65 _young_list_target_length(0),
66 _young_list_max_length(0),
67 _eden_surv_rate_group(new G1SurvRateGroup()),
68 _survivor_surv_rate_group(new G1SurvRateGroup()),
69 _reserve_factor((double) G1ReservePercent / 100.0),
70 _reserve_regions(0),
71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
72 _free_regions_at_end_of_collection(0),
73 _rs_length(0),
74 _rs_length_prediction(0),
75 _pending_cards_at_gc_start(0),
76 _old_gen_alloc_tracker(),
77 _initial_mark_to_mixed(),
78 _collection_set(NULL),
79 _g1h(NULL),
80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
81 _mark_remark_start_sec(0),
82 _mark_cleanup_start_sec(0),
83 _tenuring_threshold(MaxTenuringThreshold),
732 if (logged_dirty_cards > scan_heap_roots_cards) {
733 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
734 }
735 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
736 }
737
738 // Anything below that is considered to be zero
739 #define MIN_TIMER_GRANULARITY 0.0000001
740
741 void G1Policy::record_collection_pause_end(double pause_time_ms) {
742 G1GCPhaseTimes* p = phase_times();
743
744 double end_time_sec = os::elapsedTime();
745
746 bool this_pause_included_initial_mark = false;
747 bool this_pause_was_young_only = collector_state()->in_young_only_phase();
748
749 bool update_stats = !_g1h->evacuation_failed();
750
751 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
752
753 this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
754 if (this_pause_included_initial_mark) {
755 record_concurrent_mark_init_end(0.0);
756 } else {
757 maybe_start_marking();
758 }
759
760 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
761 if (app_time_ms < MIN_TIMER_GRANULARITY) {
762 // This usually happens due to the timer not having the required
763 // granularity. Some Linuxes are the usual culprits.
764 // We'll just set it to something (arbitrarily) small.
765 app_time_ms = 1.0;
766 }
767
768 if (update_stats) {
769 // We maintain the invariant that all objects allocated by mutator
770 // threads will be allocated out of eden regions. So, we can use
771 // the eden region number allocated since the previous GC to
|