< prev index next >

src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Print this page

        

*** 1,7 **** /* ! * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 157,166 **** --- 157,263 ---- bool adaptive_young_list_length() { return _adaptive_size; } }; + class G1CollectorState VALUE_OBJ_CLASS_SPEC { + // Various boolean state variables that indicate + // the phase of the G1 collection. + bool _in_young_gc_mode; + // indicates whether we are in full young or partially young GC mode + bool _gcs_are_young; + bool _last_gc_was_young; + bool _last_young_gc; + + // If initiate_conc_mark_if_possible() is set at the beginning of a + // pause, it is a suggestion that the pause should start a marking + // cycle by doing the initial-mark work. However, it is possible + // that the concurrent marking thread is still finishing up the + // previous marking cycle (e.g., clearing the next marking + // bitmap). If that is the case we cannot start a new cycle and + // we'll have to wait for the concurrent marking thread to finish + // what it is doing. In this case we will postpone the marking cycle + // initiation decision for the next pause. When we eventually decide + // to start a cycle, we will set _during_initial_mark_pause which + // will stay true until the end of the initial-mark pause and it's + // the condition that indicates that a pause is doing the + // initial-mark work. + volatile bool _during_initial_mark_pause; + + // At the end of a pause we check the heap occupancy and we decide + // whether we will start a marking cycle during the next pause. If + // we decide that we want to do that, we will set this parameter to + // true. So, this parameter will stay true between the end of a + // pause and the beginning of a subsequent pause (not necessarily + // the next one, see the comments on the next field) when we decide + // that we will indeed start a marking cycle and do the initial-mark + // work. + volatile bool _initiate_conc_mark_if_possible; + + // NOTE: if some of these are synonyms for others, + // the redundant fields should be eliminated. XXX + bool _during_marking; + bool _mark_in_progress; + bool _in_marking_window; + bool _in_marking_window_im; + + public: + G1CollectorState() : + _in_young_gc_mode(false), + _gcs_are_young(true), + _last_gc_was_young(false), + _last_young_gc(false), + + _during_initial_mark_pause(false), + _initiate_conc_mark_if_possible(false), + + _during_marking(false), + _mark_in_progress(false), + _in_marking_window(false), + _in_marking_window_im(false) {} + + + // Setters + void set_in_young_gc_mode(bool v) { _in_young_gc_mode = v; } + void set_gcs_are_young(bool v) { _gcs_are_young = v; } + void set_last_gc_was_young(bool v) { _last_gc_was_young = v; } + void set_last_young_gc(bool v) { _last_young_gc = v; } + void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; } + void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; } + void set_during_marking(bool v) { _during_marking = v; } + void set_mark_in_progress(bool v) { _mark_in_progress = v; } + void set_in_marking_window(bool v) { _in_marking_window = v; } + void set_in_marking_window_im(bool v) { _in_marking_window_im = v; } + + // Puns + //////// + void set_marking_complete() { set_mark_in_progress(false); } + void set_marking_started() { set_mark_in_progress(true); } + + // Getters + bool in_young_gc_mode() { return _in_young_gc_mode; } + bool gcs_are_young() { return _gcs_are_young; } + bool last_gc_was_young() { return _last_gc_was_young; } + bool last_young_gc() { return _last_young_gc; } + bool during_initial_mark_pause() { return _during_initial_mark_pause; } + bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } + bool during_marking() { return _during_marking; } + bool mark_in_progress() { return _mark_in_progress; } + bool in_marking_window() { return _in_marking_window; } + bool in_marking_window_im() { return _in_marking_window_im; } + + + // Composite booleans (clients worry about flickering) + bool during_concurrent_mark() { + return (_in_marking_window && !_in_marking_window_im); + } + + bool should_propagate() { // XXX should have a more suitable state name or abstraction for this + return (_last_young_gc && !_in_marking_window); + } + }; + class G1CollectorPolicy: public CollectorPolicy { private: // either equal to the number of parallel threads, if ParallelGCThreads // has been set, or 1 otherwise int _parallel_gc_threads;
*** 192,229 **** TraceOldGenTimeData _trace_old_gen_time_data; double _stop_world_start; // indicates whether we are in young or mixed GC mode ! bool _gcs_are_young; uint _young_list_target_length; uint _young_list_fixed_length; // The max number of regions we can extend the eden by while the GC // locker is active. This should be >= _young_list_target_length; uint _young_list_max_length; - bool _last_gc_was_young; - - bool _during_marking; - bool _in_marking_window; - bool _in_marking_window_im; - SurvRateGroup* _short_lived_surv_rate_group; SurvRateGroup* _survivor_surv_rate_group; // add here any more surv rate groups double _gc_overhead_perc; double _reserve_factor; uint _reserve_regions; - bool during_marking() { - return _during_marking; - } - enum PredictionConstants { TruncatedSeqLength = 10 }; TruncatedSeq* _alloc_rate_ms_seq; --- 289,316 ---- TraceOldGenTimeData _trace_old_gen_time_data; double _stop_world_start; // indicates whether we are in young or mixed GC mode ! G1CollectorState _collector_state; uint _young_list_target_length; uint _young_list_fixed_length; // The max number of regions we can extend the eden by while the GC // locker is active. This should be >= _young_list_target_length; uint _young_list_max_length; SurvRateGroup* _short_lived_surv_rate_group; SurvRateGroup* _survivor_surv_rate_group; // add here any more surv rate groups double _gc_overhead_perc; double _reserve_factor; uint _reserve_regions; enum PredictionConstants { TruncatedSeqLength = 10 }; TruncatedSeq* _alloc_rate_ms_seq;
*** 361,371 **** return (size_t) ((double) rs_length * predict_mixed_cards_per_entry_ratio()); } double predict_rs_scan_time_ms(size_t card_num) { ! if (gcs_are_young()) { return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); } else { return predict_mixed_rs_scan_time_ms(card_num); } } --- 448,458 ---- return (size_t) ((double) rs_length * predict_mixed_cards_per_entry_ratio()); } double predict_rs_scan_time_ms(size_t card_num) { ! if (collector_state()->gcs_are_young()) { return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); } else { return predict_mixed_rs_scan_time_ms(card_num); } }
*** 388,398 **** get_new_prediction(_cost_per_byte_ms_during_cm_seq); } } double predict_object_copy_time_ms(size_t bytes_to_copy) { ! if (_in_marking_window && !_in_marking_window_im) { return predict_object_copy_time_ms_during_cm(bytes_to_copy); } else { return (double) bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); } --- 475,485 ---- get_new_prediction(_cost_per_byte_ms_during_cm_seq); } } double predict_object_copy_time_ms(size_t bytes_to_copy) { ! if (collector_state()->during_concurrent_mark()) { return predict_object_copy_time_ms_during_cm(bytes_to_copy); } else { return (double) bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); }
*** 426,436 **** survivor_cset_region_length(); } double predict_survivor_regions_evac_time(); void cset_regions_freed() { ! bool propagate = _last_gc_was_young && !_in_marking_window; _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); _survivor_surv_rate_group->all_surviving_words_recorded(propagate); // also call it on any more surv rate groups } --- 513,523 ---- survivor_cset_region_length(); } double predict_survivor_regions_evac_time(); void cset_regions_freed() { ! bool propagate = collector_state()->should_propagate(); _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); _survivor_surv_rate_group->all_surviving_words_recorded(propagate); // also call it on any more surv rate groups }
*** 550,586 **** double recent_avg_pause_time_ratio() { return _recent_avg_pause_time_ratio; } - // At the end of a pause we check the heap occupancy and we decide - // whether we will start a marking cycle during the next pause. If - // we decide that we want to do that, we will set this parameter to - // true. So, this parameter will stay true between the end of a - // pause and the beginning of a subsequent pause (not necessarily - // the next one, see the comments on the next field) when we decide - // that we will indeed start a marking cycle and do the initial-mark - // work. - volatile bool _initiate_conc_mark_if_possible; - - // If initiate_conc_mark_if_possible() is set at the beginning of a - // pause, it is a suggestion that the pause should start a marking - // cycle by doing the initial-mark work. However, it is possible - // that the concurrent marking thread is still finishing up the - // previous marking cycle (e.g., clearing the next marking - // bitmap). If that is the case we cannot start a new cycle and - // we'll have to wait for the concurrent marking thread to finish - // what it is doing. In this case we will postpone the marking cycle - // initiation decision for the next pause. When we eventually decide - // to start a cycle, we will set _during_initial_mark_pause which - // will stay true until the end of the initial-mark pause and it's - // the condition that indicates that a pause is doing the - // initial-mark work. - volatile bool _during_initial_mark_pause; - - bool _last_young_gc; - // This set of variables tracks the collector efficiency, in order to // determine whether we should initiate a new marking. double _cur_mark_stop_world_time_ms; double _mark_remark_start_sec; double _mark_cleanup_start_sec; --- 637,646 ----
*** 645,654 **** --- 705,716 ---- virtual CollectorPolicy::Name kind() { return CollectorPolicy::G1CollectorPolicyKind; } + G1CollectorState* collector_state() { return &_collector_state; } + G1GCPhaseTimes* phase_times() const { return _phase_times; } // Check the current value of the young list RSet lengths and // compare it against the last prediction. If the current value is // higher, recalculate the young list target length prediction.
*** 782,799 **** #ifndef PRODUCT void print_collection_set(HeapRegion* list_head, outputStream* st); #endif // !PRODUCT - bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } - void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } - void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } - - bool during_initial_mark_pause() { return _during_initial_mark_pause; } - void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } - void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } - // This sets the initiate_conc_mark_if_possible() flag to start a // new cycle, as long as we are not already in one. It's best if it // is called during a safepoint when the test whether a cycle is in // progress or not is stable. bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); --- 844,853 ----
*** 833,849 **** uint young_list_max_length() { return _young_list_max_length; } - bool gcs_are_young() { - return _gcs_are_young; - } - void set_gcs_are_young(bool gcs_are_young) { - _gcs_are_young = gcs_are_young; - } - bool adaptive_young_list_length() { return _young_gen_sizer->adaptive_young_list_length(); } private: --- 887,896 ----
< prev index next >