35 #include "gc/g1/g1YoungGenSizer.hpp"
36 #include "gc/g1/heapRegion.inline.hpp"
37 #include "gc/g1/heapRegionRemSet.hpp"
38 #include "gc/shared/gcPolicyCounters.hpp"
39 #include "runtime/arguments.hpp"
40 #include "runtime/java.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "utilities/debug.hpp"
43 #include "utilities/pair.hpp"
44
45 G1Policy::G1Policy() :
46 _predictor(G1ConfidencePercent / 100.0),
47 _analytics(new G1Analytics(&_predictor)),
48 _pause_time_target_ms((double) MaxGCPauseMillis),
49 _rs_lengths_prediction(0),
50 _max_survivor_regions(0),
51 _survivors_age_table(true),
52
53 _bytes_allocated_in_old_since_last_gc(0),
54 _ihop_control(NULL),
55 _initial_mark_to_mixed() {
56
57 // SurvRateGroups below must be initialized after the predictor because they
58 // indirectly use it through this object passed to their constructor.
59 _short_lived_surv_rate_group =
60 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
61 _survivor_surv_rate_group =
62 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
63
64 _phase_times = new G1GCPhaseTimes(ParallelGCThreads);
65
66 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
67 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
68 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
69
70 _tenuring_threshold = MaxTenuringThreshold;
71
72
73 guarantee(G1ReservePercent <= 50, "Range checking should not allow values over 50.");
74 _reserve_factor = (double) G1ReservePercent / 100.0;
75 // This will be set when the heap is expanded
76 // for the first time during initialization.
77 _reserve_regions = 0;
78
79 _ihop_control = create_ihop_control();
80 }
81
82 G1Policy::~G1Policy() {
83 delete _ihop_control;
84 }
85
86 G1CollectorState* G1Policy::collector_state() const { return _g1->collector_state(); }
87
88 void G1Policy::init() {
89 // Set aside an initial future to_space.
90 _g1 = G1CollectedHeap::heap();
91 _collection_set = _g1->collection_set();
92
93 assert(Heap_lock->owned_by_self(), "Locking discipline.");
94
95 _g1->collector_policy()->initialize_gc_policy_counters();
96
97 if (adaptive_young_list_length()) {
98 _young_list_fixed_length = 0;
99 } else {
100 _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
101 }
102 _young_gen_sizer.adjust_max_new_size(_g1->max_regions());
103
104 _free_regions_at_end_of_collection = _g1->num_free_regions();
105
106 update_young_list_max_and_target_length();
107 // We may immediately start allocating regions and placing them on the
108 // collection set list. Initialize the per-collection set info
109 _collection_set->start_incremental_building();
110 }
111
112 void G1Policy::note_gc_start() {
113 phase_times()->note_gc_start();
114 }
115
116 bool G1Policy::predict_will_fit(uint young_length,
953 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
954 double expansion_region_num_d = perc * (double) _young_list_target_length;
955 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
956 // less than 1.0) we'll get 1.
957 expansion_region_num = (uint) ceil(expansion_region_num_d);
958 } else {
959 assert(expansion_region_num == 0, "sanity");
960 }
961 _young_list_max_length = _young_list_target_length + expansion_region_num;
962 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
963 }
964
965 // Calculates survivor space parameters.
966 void G1Policy::update_survivors_policy() {
967 double max_survivor_regions_d =
968 (double) _young_list_target_length / (double) SurvivorRatio;
969 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
970 // smaller than 1.0) we'll get 1.
971 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
972
973 GCPolicyCounters* counters = _g1->collector_policy()->counters();
974 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
975 HeapRegion::GrainWords * _max_survivor_regions, counters);
976 }
977
978 bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
979 // We actually check whether we are marking here and not if we are in a
980 // reclamation phase. This means that we will schedule a concurrent mark
981 // even while we are still in the process of reclaiming memory.
982 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
983 if (!during_cycle) {
984 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
985 collector_state()->set_initiate_conc_mark_if_possible(true);
986 return true;
987 } else {
988 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
989 return false;
990 }
991 }
992
993 void G1Policy::initiate_conc_mark() {
994 collector_state()->set_during_initial_mark_pause(true);
995 collector_state()->set_initiate_conc_mark_if_possible(false);
|
35 #include "gc/g1/g1YoungGenSizer.hpp"
36 #include "gc/g1/heapRegion.inline.hpp"
37 #include "gc/g1/heapRegionRemSet.hpp"
38 #include "gc/shared/gcPolicyCounters.hpp"
39 #include "runtime/arguments.hpp"
40 #include "runtime/java.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "utilities/debug.hpp"
43 #include "utilities/pair.hpp"
44
45 G1Policy::G1Policy() :
46 _predictor(G1ConfidencePercent / 100.0),
47 _analytics(new G1Analytics(&_predictor)),
48 _pause_time_target_ms((double) MaxGCPauseMillis),
49 _rs_lengths_prediction(0),
50 _max_survivor_regions(0),
51 _survivors_age_table(true),
52
53 _bytes_allocated_in_old_since_last_gc(0),
54 _ihop_control(NULL),
55 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 3)),
56 _initial_mark_to_mixed() {
57
58 // SurvRateGroups below must be initialized after the predictor because they
59 // indirectly use it through this object passed to their constructor.
60 _short_lived_surv_rate_group =
61 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
62 _survivor_surv_rate_group =
63 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
64
65 _phase_times = new G1GCPhaseTimes(ParallelGCThreads);
66
67 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
68 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
69 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
70
71 _tenuring_threshold = MaxTenuringThreshold;
72
73
74 guarantee(G1ReservePercent <= 50, "Range checking should not allow values over 50.");
75 _reserve_factor = (double) G1ReservePercent / 100.0;
76 // This will be set when the heap is expanded
77 // for the first time during initialization.
78 _reserve_regions = 0;
79
80 _ihop_control = create_ihop_control();
81 }
82
83 G1Policy::~G1Policy() {
84 delete _ihop_control;
85 }
86
87 G1CollectorState* G1Policy::collector_state() const { return _g1->collector_state(); }
88
89 void G1Policy::init() {
90 // Set aside an initial future to_space.
91 _g1 = G1CollectedHeap::heap();
92 _collection_set = _g1->collection_set();
93
94 assert(Heap_lock->owned_by_self(), "Locking discipline.");
95
96 if (adaptive_young_list_length()) {
97 _young_list_fixed_length = 0;
98 } else {
99 _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
100 }
101 _young_gen_sizer.adjust_max_new_size(_g1->max_regions());
102
103 _free_regions_at_end_of_collection = _g1->num_free_regions();
104
105 update_young_list_max_and_target_length();
106 // We may immediately start allocating regions and placing them on the
107 // collection set list. Initialize the per-collection set info
108 _collection_set->start_incremental_building();
109 }
110
111 void G1Policy::note_gc_start() {
112 phase_times()->note_gc_start();
113 }
114
115 bool G1Policy::predict_will_fit(uint young_length,
952 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
953 double expansion_region_num_d = perc * (double) _young_list_target_length;
954 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
955 // less than 1.0) we'll get 1.
956 expansion_region_num = (uint) ceil(expansion_region_num_d);
957 } else {
958 assert(expansion_region_num == 0, "sanity");
959 }
960 _young_list_max_length = _young_list_target_length + expansion_region_num;
961 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
962 }
963
964 // Calculates survivor space parameters.
965 void G1Policy::update_survivors_policy() {
966 double max_survivor_regions_d =
967 (double) _young_list_target_length / (double) SurvivorRatio;
968 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
969 // smaller than 1.0) we'll get 1.
970 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
971
972 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
973 HeapRegion::GrainWords * _max_survivor_regions, _policy_counters);
974 }
975
976 bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
977 // We actually check whether we are marking here and not if we are in a
978 // reclamation phase. This means that we will schedule a concurrent mark
979 // even while we are still in the process of reclaiming memory.
980 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
981 if (!during_cycle) {
982 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
983 collector_state()->set_initiate_conc_mark_if_possible(true);
984 return true;
985 } else {
986 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
987 return false;
988 }
989 }
990
991 void G1Policy::initiate_conc_mark() {
992 collector_state()->set_during_initial_mark_pause(true);
993 collector_state()->set_initiate_conc_mark_if_possible(false);
|