124 // Card Table Count Cache stats
125 double _min_clear_cc_time_ms; // min
126 double _max_clear_cc_time_ms; // max
127 double _cur_clear_cc_time_ms; // clearing time during current pause
128 double _cum_clear_cc_time_ms; // cummulative clearing time
129 jlong _num_cc_clears; // number of times the card count cache has been cleared
130 #endif
131
132 // Statistics for recent GC pauses. See below for how indexed.
133 TruncatedSeq* _recent_rs_scan_times_ms;
134
135 // These exclude marking times.
136 TruncatedSeq* _recent_pause_times_ms;
137 TruncatedSeq* _recent_gc_times_ms;
138
139 TruncatedSeq* _recent_CS_bytes_used_before;
140 TruncatedSeq* _recent_CS_bytes_surviving;
141
142 TruncatedSeq* _recent_rs_sizes;
143
144 TruncatedSeq* _concurrent_mark_init_times_ms;
145 TruncatedSeq* _concurrent_mark_remark_times_ms;
146 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
147
148 Summary* _summary;
149
150 NumberSeq* _all_pause_times_ms;
151 NumberSeq* _all_full_gc_times_ms;
152 double _stop_world_start;
153 NumberSeq* _all_stop_world_times_ms;
154 NumberSeq* _all_yield_times_ms;
155
156 size_t _region_num_young;
157 size_t _region_num_tenured;
158 size_t _prev_region_num_young;
159 size_t _prev_region_num_tenured;
160
161 NumberSeq* _all_mod_union_times_ms;
162
163 int _aux_num;
164 NumberSeq* _all_aux_times_ms;
165 double* _cur_aux_start_times_ms;
166 double* _cur_aux_times_ms;
167 bool* _cur_aux_times_set;
168
169 double* _par_last_gc_worker_start_times_ms;
170 double* _par_last_ext_root_scan_times_ms;
171 double* _par_last_mark_stack_scan_times_ms;
172 double* _par_last_update_rs_times_ms;
173 double* _par_last_update_rs_processed_buffers;
174 double* _par_last_scan_rs_times_ms;
175 double* _par_last_obj_copy_times_ms;
176 double* _par_last_termination_times_ms;
177 double* _par_last_termination_attempts;
178 double* _par_last_gc_worker_end_times_ms;
179 double* _par_last_gc_worker_times_ms;
180
181 // indicates that we are in young GC mode
182 bool _in_young_gc_mode;
183
184 // indicates whether we are in full young or partially young GC mode
185 bool _full_young_gcs;
186
187 // if true, then it tries to dynamically adjust the length of the
188 // young list
189 bool _adaptive_young_list_length;
190 size_t _young_list_min_length;
191 size_t _young_list_target_length;
192 size_t _young_list_fixed_length;
193
194 // The max number of regions we can extend the eden by while the GC
195 // locker is active. This should be >= _young_list_target_length;
196 size_t _young_list_max_length;
197
198 size_t _young_cset_length;
199 bool _last_young_gc_full;
200
201 unsigned _full_young_pause_num;
202 unsigned _partial_young_pause_num;
203
510 size_t heap_bytes = _g1->capacity();
511 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
512 }
513
514 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
515 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
516
517 _known_garbage_bytes -= known_garbage_bytes;
518 size_t heap_bytes = _g1->capacity();
519 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
520 }
521
522 G1MMUTracker* mmu_tracker() {
523 return _mmu_tracker;
524 }
525
526 double max_pause_time_ms() {
527 return _mmu_tracker->max_gc_time() * 1000.0;
528 }
529
530 double predict_init_time_ms() {
531 return get_new_prediction(_concurrent_mark_init_times_ms);
532 }
533
534 double predict_remark_time_ms() {
535 return get_new_prediction(_concurrent_mark_remark_times_ms);
536 }
537
538 double predict_cleanup_time_ms() {
539 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
540 }
541
542 // Returns an estimate of the survival rate of the region at yg-age
543 // "yg_age".
544 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
545 TruncatedSeq* seq = surv_rate_group->get_seq(age);
546 if (seq->num() == 0)
547 gclog_or_tty->print("BARF! age is %d", age);
548 guarantee( seq->num() > 0, "invariant" );
549 double pred = get_new_prediction(seq);
550 if (pred > 1.0)
551 pred = 1.0;
552 return pred;
553 }
759 // pause, it is a suggestion that the pause should start a marking
760 // cycle by doing the initial-mark work. However, it is possible
761 // that the concurrent marking thread is still finishing up the
762 // previous marking cycle (e.g., clearing the next marking
763 // bitmap). If that is the case we cannot start a new cycle and
764 // we'll have to wait for the concurrent marking thread to finish
765 // what it is doing. In this case we will postpone the marking cycle
766 // initiation decision for the next pause. When we eventually decide
767 // to start a cycle, we will set _during_initial_mark_pause which
768 // will stay true until the end of the initial-mark pause and it's
769 // the condition that indicates that a pause is doing the
770 // initial-mark work.
771 volatile bool _during_initial_mark_pause;
772
773 bool _should_revert_to_full_young_gcs;
774 bool _last_full_young_gc;
775
776 // This set of variables tracks the collector efficiency, in order to
777 // determine whether we should initiate a new marking.
778 double _cur_mark_stop_world_time_ms;
779 double _mark_init_start_sec;
780 double _mark_remark_start_sec;
781 double _mark_cleanup_start_sec;
782 double _mark_closure_time_ms;
783
784 void calculate_young_list_min_length();
785 void calculate_young_list_target_length();
786 void calculate_young_list_target_length(size_t rs_lengths);
787
788 public:
789
790 G1CollectorPolicy();
791
792 virtual G1CollectorPolicy* as_g1_policy() { return this; }
793
794 virtual CollectorPolicy::Name kind() {
795 return CollectorPolicy::G1CollectorPolicyKind;
796 }
797
798 void check_prediction_validity();
799
832 virtual HeapWord* satisfy_failed_allocation(size_t size,
833 bool is_tlab);
834
835 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
836
837 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
838
839 // The number of collection pauses so far.
840 long n_pauses() const { return _n_pauses; }
841
842 // Update the heuristic info to record a collection pause of the given
843 // start time, where the given number of bytes were used at the start.
844 // This may involve changing the desired size of a collection set.
845
846 virtual void record_stop_world_start();
847
848 virtual void record_collection_pause_start(double start_time_sec,
849 size_t start_used);
850
851 // Must currently be called while the world is stopped.
852 virtual void record_concurrent_mark_init_start();
853 virtual void record_concurrent_mark_init_end();
854 void record_concurrent_mark_init_end_pre(double
855 mark_init_elapsed_time_ms);
856
857 void record_mark_closure_time(double mark_closure_time_ms);
858
859 virtual void record_concurrent_mark_remark_start();
860 virtual void record_concurrent_mark_remark_end();
861
862 virtual void record_concurrent_mark_cleanup_start();
863 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
864 size_t max_live_bytes);
865 virtual void record_concurrent_mark_cleanup_completed();
866
867 virtual void record_concurrent_pause();
868 virtual void record_concurrent_pause_end();
869
870 virtual void record_collection_pause_end();
871 void print_heap_transition();
872
873 // Record the fact that a full collection occurred.
874 virtual void record_full_collection_start();
1100
1101 bool is_young_list_full() {
1102 size_t young_list_length = _g1->young_list()->length();
1103 size_t young_list_target_length = _young_list_target_length;
1104 if (G1FixedEdenSize) {
1105 young_list_target_length -= _max_survivor_regions;
1106 }
1107 return young_list_length >= young_list_target_length;
1108 }
1109
1110 bool can_expand_young_list() {
1111 size_t young_list_length = _g1->young_list()->length();
1112 size_t young_list_max_length = _young_list_max_length;
1113 if (G1FixedEdenSize) {
1114 young_list_max_length -= _max_survivor_regions;
1115 }
1116 return young_list_length < young_list_max_length;
1117 }
1118
1119 void update_region_num(bool young);
1120
1121 bool in_young_gc_mode() {
1122 return _in_young_gc_mode;
1123 }
1124 void set_in_young_gc_mode(bool in_young_gc_mode) {
1125 _in_young_gc_mode = in_young_gc_mode;
1126 }
1127
1128 bool full_young_gcs() {
1129 return _full_young_gcs;
1130 }
1131 void set_full_young_gcs(bool full_young_gcs) {
1132 _full_young_gcs = full_young_gcs;
1133 }
1134
1135 bool adaptive_young_list_length() {
1136 return _adaptive_young_list_length;
1137 }
1138 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
1139 _adaptive_young_list_length = adaptive_young_list_length;
1140 }
1141
1142 inline double get_gc_eff_factor() {
1143 double ratio = _known_garbage_ratio;
1144
1145 double square = ratio * ratio;
1146 // square = square * square;
|
124 // Card Table Count Cache stats
125 double _min_clear_cc_time_ms; // min
126 double _max_clear_cc_time_ms; // max
127 double _cur_clear_cc_time_ms; // clearing time during current pause
128 double _cum_clear_cc_time_ms; // cummulative clearing time
129 jlong _num_cc_clears; // number of times the card count cache has been cleared
130 #endif
131
132 // Statistics for recent GC pauses. See below for how indexed.
133 TruncatedSeq* _recent_rs_scan_times_ms;
134
135 // These exclude marking times.
136 TruncatedSeq* _recent_pause_times_ms;
137 TruncatedSeq* _recent_gc_times_ms;
138
139 TruncatedSeq* _recent_CS_bytes_used_before;
140 TruncatedSeq* _recent_CS_bytes_surviving;
141
142 TruncatedSeq* _recent_rs_sizes;
143
144 TruncatedSeq* _concurrent_mark_remark_times_ms;
145 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
146
147 Summary* _summary;
148
149 NumberSeq* _all_pause_times_ms;
150 NumberSeq* _all_full_gc_times_ms;
151 double _stop_world_start;
152 NumberSeq* _all_stop_world_times_ms;
153 NumberSeq* _all_yield_times_ms;
154
155 size_t _region_num_young;
156 size_t _region_num_tenured;
157 size_t _prev_region_num_young;
158 size_t _prev_region_num_tenured;
159
160 NumberSeq* _all_mod_union_times_ms;
161
162 int _aux_num;
163 NumberSeq* _all_aux_times_ms;
164 double* _cur_aux_start_times_ms;
165 double* _cur_aux_times_ms;
166 bool* _cur_aux_times_set;
167
168 double* _par_last_gc_worker_start_times_ms;
169 double* _par_last_ext_root_scan_times_ms;
170 double* _par_last_mark_stack_scan_times_ms;
171 double* _par_last_update_rs_times_ms;
172 double* _par_last_update_rs_processed_buffers;
173 double* _par_last_scan_rs_times_ms;
174 double* _par_last_obj_copy_times_ms;
175 double* _par_last_termination_times_ms;
176 double* _par_last_termination_attempts;
177 double* _par_last_gc_worker_end_times_ms;
178 double* _par_last_gc_worker_times_ms;
179
180 // indicates whether we are in full young or partially young GC mode
181 bool _full_young_gcs;
182
183 // if true, then it tries to dynamically adjust the length of the
184 // young list
185 bool _adaptive_young_list_length;
186 size_t _young_list_min_length;
187 size_t _young_list_target_length;
188 size_t _young_list_fixed_length;
189
190 // The max number of regions we can extend the eden by while the GC
191 // locker is active. This should be >= _young_list_target_length;
192 size_t _young_list_max_length;
193
194 size_t _young_cset_length;
195 bool _last_young_gc_full;
196
197 unsigned _full_young_pause_num;
198 unsigned _partial_young_pause_num;
199
506 size_t heap_bytes = _g1->capacity();
507 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
508 }
509
510 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
511 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
512
513 _known_garbage_bytes -= known_garbage_bytes;
514 size_t heap_bytes = _g1->capacity();
515 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
516 }
517
518 G1MMUTracker* mmu_tracker() {
519 return _mmu_tracker;
520 }
521
522 double max_pause_time_ms() {
523 return _mmu_tracker->max_gc_time() * 1000.0;
524 }
525
526 double predict_remark_time_ms() {
527 return get_new_prediction(_concurrent_mark_remark_times_ms);
528 }
529
530 double predict_cleanup_time_ms() {
531 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
532 }
533
534 // Returns an estimate of the survival rate of the region at yg-age
535 // "yg_age".
536 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
537 TruncatedSeq* seq = surv_rate_group->get_seq(age);
538 if (seq->num() == 0)
539 gclog_or_tty->print("BARF! age is %d", age);
540 guarantee( seq->num() > 0, "invariant" );
541 double pred = get_new_prediction(seq);
542 if (pred > 1.0)
543 pred = 1.0;
544 return pred;
545 }
751 // pause, it is a suggestion that the pause should start a marking
752 // cycle by doing the initial-mark work. However, it is possible
753 // that the concurrent marking thread is still finishing up the
754 // previous marking cycle (e.g., clearing the next marking
755 // bitmap). If that is the case we cannot start a new cycle and
756 // we'll have to wait for the concurrent marking thread to finish
757 // what it is doing. In this case we will postpone the marking cycle
758 // initiation decision for the next pause. When we eventually decide
759 // to start a cycle, we will set _during_initial_mark_pause which
760 // will stay true until the end of the initial-mark pause and it's
761 // the condition that indicates that a pause is doing the
762 // initial-mark work.
763 volatile bool _during_initial_mark_pause;
764
765 bool _should_revert_to_full_young_gcs;
766 bool _last_full_young_gc;
767
768 // This set of variables tracks the collector efficiency, in order to
769 // determine whether we should initiate a new marking.
770 double _cur_mark_stop_world_time_ms;
771 double _mark_remark_start_sec;
772 double _mark_cleanup_start_sec;
773 double _mark_closure_time_ms;
774
775 void calculate_young_list_min_length();
776 void calculate_young_list_target_length();
777 void calculate_young_list_target_length(size_t rs_lengths);
778
779 public:
780
781 G1CollectorPolicy();
782
783 virtual G1CollectorPolicy* as_g1_policy() { return this; }
784
785 virtual CollectorPolicy::Name kind() {
786 return CollectorPolicy::G1CollectorPolicyKind;
787 }
788
789 void check_prediction_validity();
790
823 virtual HeapWord* satisfy_failed_allocation(size_t size,
824 bool is_tlab);
825
826 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
827
828 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
829
830 // The number of collection pauses so far.
831 long n_pauses() const { return _n_pauses; }
832
833 // Update the heuristic info to record a collection pause of the given
834 // start time, where the given number of bytes were used at the start.
835 // This may involve changing the desired size of a collection set.
836
837 virtual void record_stop_world_start();
838
839 virtual void record_collection_pause_start(double start_time_sec,
840 size_t start_used);
841
842 // Must currently be called while the world is stopped.
843 void record_concurrent_mark_init_end(double
844 mark_init_elapsed_time_ms);
845
846 void record_mark_closure_time(double mark_closure_time_ms);
847
848 virtual void record_concurrent_mark_remark_start();
849 virtual void record_concurrent_mark_remark_end();
850
851 virtual void record_concurrent_mark_cleanup_start();
852 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
853 size_t max_live_bytes);
854 virtual void record_concurrent_mark_cleanup_completed();
855
856 virtual void record_concurrent_pause();
857 virtual void record_concurrent_pause_end();
858
859 virtual void record_collection_pause_end();
860 void print_heap_transition();
861
862 // Record the fact that a full collection occurred.
863 virtual void record_full_collection_start();
1089
1090 bool is_young_list_full() {
1091 size_t young_list_length = _g1->young_list()->length();
1092 size_t young_list_target_length = _young_list_target_length;
1093 if (G1FixedEdenSize) {
1094 young_list_target_length -= _max_survivor_regions;
1095 }
1096 return young_list_length >= young_list_target_length;
1097 }
1098
1099 bool can_expand_young_list() {
1100 size_t young_list_length = _g1->young_list()->length();
1101 size_t young_list_max_length = _young_list_max_length;
1102 if (G1FixedEdenSize) {
1103 young_list_max_length -= _max_survivor_regions;
1104 }
1105 return young_list_length < young_list_max_length;
1106 }
1107
1108 void update_region_num(bool young);
1109
1110 bool full_young_gcs() {
1111 return _full_young_gcs;
1112 }
1113 void set_full_young_gcs(bool full_young_gcs) {
1114 _full_young_gcs = full_young_gcs;
1115 }
1116
1117 bool adaptive_young_list_length() {
1118 return _adaptive_young_list_length;
1119 }
1120 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
1121 _adaptive_young_list_length = adaptive_young_list_length;
1122 }
1123
1124 inline double get_gc_eff_factor() {
1125 double ratio = _known_garbage_ratio;
1126
1127 double square = ratio * ratio;
1128 // square = square * square;
|