1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
142 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
143
144 public:
145 G1YoungGenSizer();
146 // Calculate the maximum length of the young gen given the number of regions
147 // depending on the sizing algorithm.
148 uint max_young_length(uint number_of_heap_regions);
149
150 void heap_size_changed(uint new_number_of_heap_regions);
151 uint min_desired_young_length() {
152 return _min_desired_young_length;
153 }
154 uint max_desired_young_length() {
155 return _max_desired_young_length;
156 }
157 bool adaptive_young_list_length() {
158 return _adaptive_size;
159 }
160 };
161
162 class G1CollectorPolicy: public CollectorPolicy {
163 private:
164 // either equal to the number of parallel threads, if ParallelGCThreads
165 // has been set, or 1 otherwise
166 int _parallel_gc_threads;
167
168 // The number of GC threads currently active.
169 uintx _no_of_gc_threads;
170
171 enum SomePrivateConstants {
172 NumPrevPausesForHeuristics = 10
173 };
174
175 G1MMUTracker* _mmu_tracker;
176
177 void initialize_alignments();
178 void initialize_flags();
179
180 CollectionSetChooser* _collectionSetChooser;
181
182 double _full_collection_start_sec;
183 uint _cur_collection_pause_used_regions_at_start;
184
185 // These exclude marking times.
186 TruncatedSeq* _recent_gc_times_ms;
187
188 TruncatedSeq* _concurrent_mark_remark_times_ms;
189 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
190
191 TraceYoungGenTimeData _trace_young_gen_time_data;
192 TraceOldGenTimeData _trace_old_gen_time_data;
193
194 double _stop_world_start;
195
196 // indicates whether we are in young or mixed GC mode
197 bool _gcs_are_young;
198
199 uint _young_list_target_length;
200 uint _young_list_fixed_length;
201
202 // The max number of regions we can extend the eden by while the GC
203 // locker is active. This should be >= _young_list_target_length;
204 uint _young_list_max_length;
205
206 bool _last_gc_was_young;
207
208 bool _during_marking;
209 bool _in_marking_window;
210 bool _in_marking_window_im;
211
212 SurvRateGroup* _short_lived_surv_rate_group;
213 SurvRateGroup* _survivor_surv_rate_group;
214 // add here any more surv rate groups
215
216 double _gc_overhead_perc;
217
218 double _reserve_factor;
219 uint _reserve_regions;
220
221 bool during_marking() {
222 return _during_marking;
223 }
224
225 enum PredictionConstants {
226 TruncatedSeqLength = 10
227 };
228
229 TruncatedSeq* _alloc_rate_ms_seq;
230 double _prev_collection_pause_end_ms;
231
232 TruncatedSeq* _rs_length_diff_seq;
233 TruncatedSeq* _cost_per_card_ms_seq;
234 TruncatedSeq* _young_cards_per_entry_ratio_seq;
235 TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
236 TruncatedSeq* _cost_per_entry_ms_seq;
237 TruncatedSeq* _mixed_cost_per_entry_ms_seq;
238 TruncatedSeq* _cost_per_byte_ms_seq;
239 TruncatedSeq* _constant_other_time_ms_seq;
240 TruncatedSeq* _young_other_cost_per_region_ms_seq;
241 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
242
243 TruncatedSeq* _pending_cards_seq;
244 TruncatedSeq* _rs_lengths_seq;
346
347 double predict_mixed_cards_per_entry_ratio() {
348 if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
349 return predict_young_cards_per_entry_ratio();
350 } else {
351 return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
352 }
353 }
354
355 size_t predict_young_card_num(size_t rs_length) {
356 return (size_t) ((double) rs_length *
357 predict_young_cards_per_entry_ratio());
358 }
359
360 size_t predict_non_young_card_num(size_t rs_length) {
361 return (size_t) ((double) rs_length *
362 predict_mixed_cards_per_entry_ratio());
363 }
364
365 double predict_rs_scan_time_ms(size_t card_num) {
366 if (gcs_are_young()) {
367 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
368 } else {
369 return predict_mixed_rs_scan_time_ms(card_num);
370 }
371 }
372
373 double predict_mixed_rs_scan_time_ms(size_t card_num) {
374 if (_mixed_cost_per_entry_ms_seq->num() < 3) {
375 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
376 } else {
377 return (double) (card_num *
378 get_new_prediction(_mixed_cost_per_entry_ms_seq));
379 }
380 }
381
382 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
383 if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
384 return (1.1 * (double) bytes_to_copy) *
385 get_new_prediction(_cost_per_byte_ms_seq);
386 } else {
387 return (double) bytes_to_copy *
388 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
389 }
390 }
391
392 double predict_object_copy_time_ms(size_t bytes_to_copy) {
393 if (_in_marking_window && !_in_marking_window_im) {
394 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
395 } else {
396 return (double) bytes_to_copy *
397 get_new_prediction(_cost_per_byte_ms_seq);
398 }
399 }
400
401 double predict_constant_other_time_ms() {
402 return get_new_prediction(_constant_other_time_ms_seq);
403 }
404
405 double predict_young_other_time_ms(size_t young_num) {
406 return (double) young_num *
407 get_new_prediction(_young_other_cost_per_region_ms_seq);
408 }
409
410 double predict_non_young_other_time_ms(size_t non_young_num) {
411 return (double) non_young_num *
412 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
413 }
414
415 double predict_base_elapsed_time_ms(size_t pending_cards);
416 double predict_base_elapsed_time_ms(size_t pending_cards,
417 size_t scanned_cards);
418 size_t predict_bytes_to_copy(HeapRegion* hr);
419 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
420
421 void set_recorded_rs_lengths(size_t rs_lengths);
422
423 uint cset_region_length() { return young_cset_region_length() +
424 old_cset_region_length(); }
425 uint young_cset_region_length() { return eden_cset_region_length() +
426 survivor_cset_region_length(); }
427
428 double predict_survivor_regions_evac_time();
429
430 void cset_regions_freed() {
431 bool propagate = _last_gc_was_young && !_in_marking_window;
432 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
433 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
434 // also call it on any more surv rate groups
435 }
436
437 G1MMUTracker* mmu_tracker() {
438 return _mmu_tracker;
439 }
440
441 double max_pause_time_ms() {
442 return _mmu_tracker->max_gc_time() * 1000.0;
443 }
444
445 double predict_remark_time_ms() {
446 return get_new_prediction(_concurrent_mark_remark_times_ms);
447 }
448
449 double predict_cleanup_time_ms() {
450 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
451 }
535 // the CSet. This is updated by the thread that adds a new region to
536 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
537 // MT-safety assumptions.
538 double _inc_cset_predicted_elapsed_time_ms;
539
540 // See the comment for _inc_cset_recorded_rs_lengths_diffs.
541 double _inc_cset_predicted_elapsed_time_ms_diffs;
542
543 // Stash a pointer to the g1 heap.
544 G1CollectedHeap* _g1;
545
546 G1GCPhaseTimes* _phase_times;
547
548 // The ratio of gc time to elapsed time, computed over recent pauses.
549 double _recent_avg_pause_time_ratio;
550
551 double recent_avg_pause_time_ratio() {
552 return _recent_avg_pause_time_ratio;
553 }
554
555 // At the end of a pause we check the heap occupancy and we decide
556 // whether we will start a marking cycle during the next pause. If
557 // we decide that we want to do that, we will set this parameter to
558 // true. So, this parameter will stay true between the end of a
559 // pause and the beginning of a subsequent pause (not necessarily
560 // the next one, see the comments on the next field) when we decide
561 // that we will indeed start a marking cycle and do the initial-mark
562 // work.
563 volatile bool _initiate_conc_mark_if_possible;
564
565 // If initiate_conc_mark_if_possible() is set at the beginning of a
566 // pause, it is a suggestion that the pause should start a marking
567 // cycle by doing the initial-mark work. However, it is possible
568 // that the concurrent marking thread is still finishing up the
569 // previous marking cycle (e.g., clearing the next marking
570 // bitmap). If that is the case we cannot start a new cycle and
571 // we'll have to wait for the concurrent marking thread to finish
572 // what it is doing. In this case we will postpone the marking cycle
573 // initiation decision for the next pause. When we eventually decide
574 // to start a cycle, we will set _during_initial_mark_pause which
575 // will stay true until the end of the initial-mark pause and it's
576 // the condition that indicates that a pause is doing the
577 // initial-mark work.
578 volatile bool _during_initial_mark_pause;
579
580 bool _last_young_gc;
581
582 // This set of variables tracks the collector efficiency, in order to
583 // determine whether we should initiate a new marking.
584 double _cur_mark_stop_world_time_ms;
585 double _mark_remark_start_sec;
586 double _mark_cleanup_start_sec;
587
588 // Update the young list target length either by setting it to the
589 // desired fixed value or by calculating it using G1's pause
590 // prediction model. If no rs_lengths parameter is passed, predict
591 // the RS lengths using the prediction model, otherwise use the
592 // given rs_lengths as the prediction.
593 void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
594
595 // Calculate and return the minimum desired young list target
596 // length. This is the minimum desired young list length according
597 // to the user's inputs.
598 uint calculate_young_list_desired_min_length(uint base_min_length);
599
600 // Calculate and return the maximum desired young list target
601 // length. This is the maximum desired young list length according
630 uint calc_min_old_cset_length();
631
632 // Calculate the maximum number of old regions we'll add to the CSet
633 // during a mixed GC.
634 uint calc_max_old_cset_length();
635
636 // Returns the given amount of uncollected reclaimable space
637 // as a percentage of the current heap capacity.
638 double reclaimable_bytes_perc(size_t reclaimable_bytes);
639
640 public:
641
642 G1CollectorPolicy();
643
644 virtual G1CollectorPolicy* as_g1_policy() { return this; }
645
646 virtual CollectorPolicy::Name kind() {
647 return CollectorPolicy::G1CollectorPolicyKind;
648 }
649
650 G1GCPhaseTimes* phase_times() const { return _phase_times; }
651
652 // Check the current value of the young list RSet lengths and
653 // compare it against the last prediction. If the current value is
654 // higher, recalculate the young list target length prediction.
655 void revise_young_list_target_length_if_necessary();
656
657 // This should be called after the heap is resized.
658 void record_new_heap_size(uint new_number_of_regions);
659
660 void init();
661
662 // Create jstat counters for the policy.
663 virtual void initialize_gc_policy_counters();
664
665 virtual HeapWord* mem_allocate_work(size_t size,
666 bool is_tlab,
667 bool* gc_overhead_limit_was_exceeded);
668
669 // This method controls how a collector handles one or more
767 // Update information about hr in the aggregated information for
768 // the incrementally built collection set.
769 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
770
771 private:
772 // Update the incremental cset information when adding a region
773 // (should not be called directly).
774 void add_region_to_incremental_cset_common(HeapRegion* hr);
775
776 public:
777 // Add hr to the LHS of the incremental collection set.
778 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
779
780 // Add hr to the RHS of the incremental collection set.
781 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
782
783 #ifndef PRODUCT
784 void print_collection_set(HeapRegion* list_head, outputStream* st);
785 #endif // !PRODUCT
786
787 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
788 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
789 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
790
791 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
792 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
793 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
794
795 // This sets the initiate_conc_mark_if_possible() flag to start a
796 // new cycle, as long as we are not already in one. It's best if it
797 // is called during a safepoint when the test whether a cycle is in
798 // progress or not is stable.
799 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
800
801 // This is called at the very beginning of an evacuation pause (it
802 // has to be the first thing that the pause does). If
803 // initiate_conc_mark_if_possible() is true, and the concurrent
804 // marking thread has completed its work during the previous cycle,
805 // it will set during_initial_mark_pause() to so that the pause does
806 // the initial-mark work and start a marking cycle.
807 void decide_on_conc_mark_initiation();
808
809 // If an expansion would be appropriate, because recent GC overhead had
810 // exceeded the desired limit, return an amount to expand by.
811 virtual size_t expansion_amount();
812
813 // Print tracing information.
814 void print_tracing_info() const;
816 // Print stats on young survival ratio
817 void print_yg_surv_rate_info() const;
818
819 void finished_recalculating_age_indexes(bool is_survivors) {
820 if (is_survivors) {
821 _survivor_surv_rate_group->finished_recalculating_age_indexes();
822 } else {
823 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
824 }
825 // do that for any other surv rate groups
826 }
827
828 size_t young_list_target_length() const { return _young_list_target_length; }
829
830 bool is_young_list_full();
831
832 bool can_expand_young_list();
833
834 uint young_list_max_length() {
835 return _young_list_max_length;
836 }
837
838 bool gcs_are_young() {
839 return _gcs_are_young;
840 }
841 void set_gcs_are_young(bool gcs_are_young) {
842 _gcs_are_young = gcs_are_young;
843 }
844
845 bool adaptive_young_list_length() {
846 return _young_gen_sizer->adaptive_young_list_length();
847 }
848
849 private:
850 //
851 // Survivor regions policy.
852 //
853
854 // Current tenuring threshold, set to 0 if the collector reaches the
855 // maximum amount of survivors regions.
856 uint _tenuring_threshold;
857
858 // The limit on the number of regions allocated for survivors.
859 uint _max_survivor_regions;
860
861 // For reporting purposes.
862 // The value of _heap_bytes_before_gc is also used to calculate
|
1 /*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
142 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
143
144 public:
145 G1YoungGenSizer();
146 // Calculate the maximum length of the young gen given the number of regions
147 // depending on the sizing algorithm.
148 uint max_young_length(uint number_of_heap_regions);
149
150 void heap_size_changed(uint new_number_of_heap_regions);
151 uint min_desired_young_length() {
152 return _min_desired_young_length;
153 }
154 uint max_desired_young_length() {
155 return _max_desired_young_length;
156 }
157 bool adaptive_young_list_length() {
158 return _adaptive_size;
159 }
160 };
161
162 class G1CollectorState VALUE_OBJ_CLASS_SPEC {
163 // Various boolean state variables that indicate
164 // the phase of the G1 collection.
165 bool _in_young_gc_mode;
166 // indicates whether we are in full young or partially young GC mode
167 bool _gcs_are_young;
168 bool _last_gc_was_young;
169 bool _last_young_gc;
170
171 // If initiate_conc_mark_if_possible() is set at the beginning of a
172 // pause, it is a suggestion that the pause should start a marking
173 // cycle by doing the initial-mark work. However, it is possible
174 // that the concurrent marking thread is still finishing up the
175 // previous marking cycle (e.g., clearing the next marking
176 // bitmap). If that is the case we cannot start a new cycle and
177 // we'll have to wait for the concurrent marking thread to finish
178 // what it is doing. In this case we will postpone the marking cycle
179 // initiation decision for the next pause. When we eventually decide
180 // to start a cycle, we will set _during_initial_mark_pause which
181 // will stay true until the end of the initial-mark pause and it's
182 // the condition that indicates that a pause is doing the
183 // initial-mark work.
184 volatile bool _during_initial_mark_pause;
185
186 // At the end of a pause we check the heap occupancy and we decide
187 // whether we will start a marking cycle during the next pause. If
188 // we decide that we want to do that, we will set this parameter to
189 // true. So, this parameter will stay true between the end of a
190 // pause and the beginning of a subsequent pause (not necessarily
191 // the next one, see the comments on the next field) when we decide
192 // that we will indeed start a marking cycle and do the initial-mark
193 // work.
194 volatile bool _initiate_conc_mark_if_possible;
195
196 // NOTE: if some of these are synonyms for others,
197 // the redundant fields should be eliminated. XXX
198 bool _during_marking;
199 bool _mark_in_progress;
200 bool _in_marking_window;
201 bool _in_marking_window_im;
202
203 public:
204 G1CollectorState() :
205 _in_young_gc_mode(false),
206 _gcs_are_young(true),
207 _last_gc_was_young(false),
208 _last_young_gc(false),
209
210 _during_initial_mark_pause(false),
211 _initiate_conc_mark_if_possible(false),
212
213 _during_marking(false),
214 _mark_in_progress(false),
215 _in_marking_window(false),
216 _in_marking_window_im(false) {}
217
218
219 // Setters
220 void set_in_young_gc_mode(bool v) { _in_young_gc_mode = v; }
221 void set_gcs_are_young(bool v) { _gcs_are_young = v; }
222 void set_last_gc_was_young(bool v) { _last_gc_was_young = v; }
223 void set_last_young_gc(bool v) { _last_young_gc = v; }
224 void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; }
225 void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
226 void set_during_marking(bool v) { _during_marking = v; }
227 void set_mark_in_progress(bool v) { _mark_in_progress = v; }
228 void set_in_marking_window(bool v) { _in_marking_window = v; }
229 void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
230
231 // Puns
232 ////////
233 void set_marking_complete() { set_mark_in_progress(false); }
234 void set_marking_started() { set_mark_in_progress(true); }
235
236 // Getters
237 bool in_young_gc_mode() { return _in_young_gc_mode; }
238 bool gcs_are_young() { return _gcs_are_young; }
239 bool last_gc_was_young() { return _last_gc_was_young; }
240 bool last_young_gc() { return _last_young_gc; }
241 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
242 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
243 bool during_marking() { return _during_marking; }
244 bool mark_in_progress() { return _mark_in_progress; }
245 bool in_marking_window() { return _in_marking_window; }
246 bool in_marking_window_im() { return _in_marking_window_im; }
247
248
249 // Composite booleans (clients worry about flickering)
250 bool during_concurrent_mark() {
251 return (_in_marking_window && !_in_marking_window_im);
252 }
253
254 bool should_propagate() { // XXX should have a more suitable state name or abstraction for this
255 return (_last_young_gc && !_in_marking_window);
256 }
257 };
258
259 class G1CollectorPolicy: public CollectorPolicy {
260 private:
261 // either equal to the number of parallel threads, if ParallelGCThreads
262 // has been set, or 1 otherwise
263 int _parallel_gc_threads;
264
265 // The number of GC threads currently active.
266 uintx _no_of_gc_threads;
267
268 enum SomePrivateConstants {
269 NumPrevPausesForHeuristics = 10
270 };
271
272 G1MMUTracker* _mmu_tracker;
273
274 void initialize_alignments();
275 void initialize_flags();
276
277 CollectionSetChooser* _collectionSetChooser;
278
279 double _full_collection_start_sec;
280 uint _cur_collection_pause_used_regions_at_start;
281
282 // These exclude marking times.
283 TruncatedSeq* _recent_gc_times_ms;
284
285 TruncatedSeq* _concurrent_mark_remark_times_ms;
286 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
287
288 TraceYoungGenTimeData _trace_young_gen_time_data;
289 TraceOldGenTimeData _trace_old_gen_time_data;
290
291 double _stop_world_start;
292
293 // indicates whether we are in young or mixed GC mode
294 G1CollectorState _collector_state;
295
296 uint _young_list_target_length;
297 uint _young_list_fixed_length;
298
299 // The max number of regions we can extend the eden by while the GC
300 // locker is active. This should be >= _young_list_target_length;
301 uint _young_list_max_length;
302
303 SurvRateGroup* _short_lived_surv_rate_group;
304 SurvRateGroup* _survivor_surv_rate_group;
305 // add here any more surv rate groups
306
307 double _gc_overhead_perc;
308
309 double _reserve_factor;
310 uint _reserve_regions;
311
312 enum PredictionConstants {
313 TruncatedSeqLength = 10
314 };
315
316 TruncatedSeq* _alloc_rate_ms_seq;
317 double _prev_collection_pause_end_ms;
318
319 TruncatedSeq* _rs_length_diff_seq;
320 TruncatedSeq* _cost_per_card_ms_seq;
321 TruncatedSeq* _young_cards_per_entry_ratio_seq;
322 TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
323 TruncatedSeq* _cost_per_entry_ms_seq;
324 TruncatedSeq* _mixed_cost_per_entry_ms_seq;
325 TruncatedSeq* _cost_per_byte_ms_seq;
326 TruncatedSeq* _constant_other_time_ms_seq;
327 TruncatedSeq* _young_other_cost_per_region_ms_seq;
328 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
329
330 TruncatedSeq* _pending_cards_seq;
331 TruncatedSeq* _rs_lengths_seq;
433
434 double predict_mixed_cards_per_entry_ratio() {
435 if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
436 return predict_young_cards_per_entry_ratio();
437 } else {
438 return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
439 }
440 }
441
442 size_t predict_young_card_num(size_t rs_length) {
443 return (size_t) ((double) rs_length *
444 predict_young_cards_per_entry_ratio());
445 }
446
447 size_t predict_non_young_card_num(size_t rs_length) {
448 return (size_t) ((double) rs_length *
449 predict_mixed_cards_per_entry_ratio());
450 }
451
452 double predict_rs_scan_time_ms(size_t card_num) {
453 if (collector_state()->gcs_are_young()) {
454 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
455 } else {
456 return predict_mixed_rs_scan_time_ms(card_num);
457 }
458 }
459
460 double predict_mixed_rs_scan_time_ms(size_t card_num) {
461 if (_mixed_cost_per_entry_ms_seq->num() < 3) {
462 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
463 } else {
464 return (double) (card_num *
465 get_new_prediction(_mixed_cost_per_entry_ms_seq));
466 }
467 }
468
469 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
470 if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
471 return (1.1 * (double) bytes_to_copy) *
472 get_new_prediction(_cost_per_byte_ms_seq);
473 } else {
474 return (double) bytes_to_copy *
475 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
476 }
477 }
478
479 double predict_object_copy_time_ms(size_t bytes_to_copy) {
480 if (collector_state()->during_concurrent_mark()) {
481 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
482 } else {
483 return (double) bytes_to_copy *
484 get_new_prediction(_cost_per_byte_ms_seq);
485 }
486 }
487
488 double predict_constant_other_time_ms() {
489 return get_new_prediction(_constant_other_time_ms_seq);
490 }
491
492 double predict_young_other_time_ms(size_t young_num) {
493 return (double) young_num *
494 get_new_prediction(_young_other_cost_per_region_ms_seq);
495 }
496
497 double predict_non_young_other_time_ms(size_t non_young_num) {
498 return (double) non_young_num *
499 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
500 }
501
502 double predict_base_elapsed_time_ms(size_t pending_cards);
503 double predict_base_elapsed_time_ms(size_t pending_cards,
504 size_t scanned_cards);
505 size_t predict_bytes_to_copy(HeapRegion* hr);
506 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
507
508 void set_recorded_rs_lengths(size_t rs_lengths);
509
510 uint cset_region_length() { return young_cset_region_length() +
511 old_cset_region_length(); }
512 uint young_cset_region_length() { return eden_cset_region_length() +
513 survivor_cset_region_length(); }
514
515 double predict_survivor_regions_evac_time();
516
517 void cset_regions_freed() {
518 bool propagate = collector_state()->should_propagate();
519 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
520 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
521 // also call it on any more surv rate groups
522 }
523
524 G1MMUTracker* mmu_tracker() {
525 return _mmu_tracker;
526 }
527
528 double max_pause_time_ms() {
529 return _mmu_tracker->max_gc_time() * 1000.0;
530 }
531
532 double predict_remark_time_ms() {
533 return get_new_prediction(_concurrent_mark_remark_times_ms);
534 }
535
536 double predict_cleanup_time_ms() {
537 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
538 }
622 // the CSet. This is updated by the thread that adds a new region to
623 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
624 // MT-safety assumptions.
625 double _inc_cset_predicted_elapsed_time_ms;
626
627 // See the comment for _inc_cset_recorded_rs_lengths_diffs.
628 double _inc_cset_predicted_elapsed_time_ms_diffs;
629
630 // Stash a pointer to the g1 heap.
631 G1CollectedHeap* _g1;
632
633 G1GCPhaseTimes* _phase_times;
634
635 // The ratio of gc time to elapsed time, computed over recent pauses.
636 double _recent_avg_pause_time_ratio;
637
638 double recent_avg_pause_time_ratio() {
639 return _recent_avg_pause_time_ratio;
640 }
641
642 // This set of variables tracks the collector efficiency, in order to
643 // determine whether we should initiate a new marking.
644 double _cur_mark_stop_world_time_ms;
645 double _mark_remark_start_sec;
646 double _mark_cleanup_start_sec;
647
648 // Update the young list target length either by setting it to the
649 // desired fixed value or by calculating it using G1's pause
650 // prediction model. If no rs_lengths parameter is passed, predict
651 // the RS lengths using the prediction model, otherwise use the
652 // given rs_lengths as the prediction.
653 void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
654
655 // Calculate and return the minimum desired young list target
656 // length. This is the minimum desired young list length according
657 // to the user's inputs.
658 uint calculate_young_list_desired_min_length(uint base_min_length);
659
660 // Calculate and return the maximum desired young list target
661 // length. This is the maximum desired young list length according
690 uint calc_min_old_cset_length();
691
692 // Calculate the maximum number of old regions we'll add to the CSet
693 // during a mixed GC.
694 uint calc_max_old_cset_length();
695
696 // Returns the given amount of uncollected reclaimable space
697 // as a percentage of the current heap capacity.
698 double reclaimable_bytes_perc(size_t reclaimable_bytes);
699
700 public:
701
702 G1CollectorPolicy();
703
704 virtual G1CollectorPolicy* as_g1_policy() { return this; }
705
706 virtual CollectorPolicy::Name kind() {
707 return CollectorPolicy::G1CollectorPolicyKind;
708 }
709
710 G1CollectorState* collector_state() { return &_collector_state; }
711
712 G1GCPhaseTimes* phase_times() const { return _phase_times; }
713
714 // Check the current value of the young list RSet lengths and
715 // compare it against the last prediction. If the current value is
716 // higher, recalculate the young list target length prediction.
717 void revise_young_list_target_length_if_necessary();
718
719 // This should be called after the heap is resized.
720 void record_new_heap_size(uint new_number_of_regions);
721
722 void init();
723
724 // Create jstat counters for the policy.
725 virtual void initialize_gc_policy_counters();
726
727 virtual HeapWord* mem_allocate_work(size_t size,
728 bool is_tlab,
729 bool* gc_overhead_limit_was_exceeded);
730
731 // This method controls how a collector handles one or more
829 // Update information about hr in the aggregated information for
830 // the incrementally built collection set.
831 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
832
833 private:
834 // Update the incremental cset information when adding a region
835 // (should not be called directly).
836 void add_region_to_incremental_cset_common(HeapRegion* hr);
837
838 public:
839 // Add hr to the LHS of the incremental collection set.
840 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
841
842 // Add hr to the RHS of the incremental collection set.
843 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
844
845 #ifndef PRODUCT
846 void print_collection_set(HeapRegion* list_head, outputStream* st);
847 #endif // !PRODUCT
848
849 // This sets the initiate_conc_mark_if_possible() flag to start a
850 // new cycle, as long as we are not already in one. It's best if it
851 // is called during a safepoint when the test whether a cycle is in
852 // progress or not is stable.
853 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
854
855 // This is called at the very beginning of an evacuation pause (it
856 // has to be the first thing that the pause does). If
857 // initiate_conc_mark_if_possible() is true, and the concurrent
858 // marking thread has completed its work during the previous cycle,
859 // it will set during_initial_mark_pause() to so that the pause does
860 // the initial-mark work and start a marking cycle.
861 void decide_on_conc_mark_initiation();
862
863 // If an expansion would be appropriate, because recent GC overhead had
864 // exceeded the desired limit, return an amount to expand by.
865 virtual size_t expansion_amount();
866
867 // Print tracing information.
868 void print_tracing_info() const;
870 // Print stats on young survival ratio
871 void print_yg_surv_rate_info() const;
872
873 void finished_recalculating_age_indexes(bool is_survivors) {
874 if (is_survivors) {
875 _survivor_surv_rate_group->finished_recalculating_age_indexes();
876 } else {
877 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
878 }
879 // do that for any other surv rate groups
880 }
881
882 size_t young_list_target_length() const { return _young_list_target_length; }
883
884 bool is_young_list_full();
885
886 bool can_expand_young_list();
887
888 uint young_list_max_length() {
889 return _young_list_max_length;
890 }
891
892 bool adaptive_young_list_length() {
893 return _young_gen_sizer->adaptive_young_list_length();
894 }
895
896 private:
897 //
898 // Survivor regions policy.
899 //
900
901 // Current tenuring threshold, set to 0 if the collector reaches the
902 // maximum amount of survivors regions.
903 uint _tenuring_threshold;
904
905 // The limit on the number of regions allocated for survivors.
906 uint _max_survivor_regions;
907
908 // For reporting purposes.
909 // The value of _heap_bytes_before_gc is also used to calculate
|