< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.hpp

Print this page




  27 
  28 #include "gc/g1/collectionSetChooser.hpp"
  29 #include "gc/g1/g1CollectorState.hpp"
  30 #include "gc/g1/g1GCPhaseTimes.hpp"
  31 #include "gc/g1/g1InCSetState.hpp"
  32 #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
  33 #include "gc/g1/g1MMUTracker.hpp"
  34 #include "gc/g1/g1Predictions.hpp"
  35 #include "gc/shared/collectorPolicy.hpp"
  36 #include "utilities/pair.hpp"
  37 
  38 // A G1CollectorPolicy makes policy decisions that determine the
  39 // characteristics of the collector.  Examples include:
  40 //   * choice of collection set.
  41 //   * when to collect.
  42 
  43 class HeapRegion;
  44 class CollectionSetChooser;
  45 class G1IHOPControl;
  46 
  47 // TraceYoungGenTime collects data on _both_ young and mixed evacuation pauses
  48 // (the latter may contain non-young regions - i.e. regions that are
  49 // technically in old) while TraceOldGenTime collects data about full GCs.
  50 class TraceYoungGenTimeData : public CHeapObj<mtGC> {
  51  private:
  52   unsigned  _young_pause_num;
  53   unsigned  _mixed_pause_num;
  54 
  55   NumberSeq _all_stop_world_times_ms;
  56   NumberSeq _all_yield_times_ms;
  57 
  58   NumberSeq _total;
  59   NumberSeq _other;
  60   NumberSeq _root_region_scan_wait;
  61   NumberSeq _parallel;
  62   NumberSeq _ext_root_scan;
  63   NumberSeq _satb_filtering;
  64   NumberSeq _update_rs;
  65   NumberSeq _scan_rs;
  66   NumberSeq _obj_copy;
  67   NumberSeq _termination;
  68   NumberSeq _parallel_other;
  69   NumberSeq _clear_ct;
  70 
  71   void print_summary(const char* str, const NumberSeq* seq) const;
  72   void print_summary_sd(const char* str, const NumberSeq* seq) const;
  73 
  74 public:
  75    TraceYoungGenTimeData() : _young_pause_num(0), _mixed_pause_num(0) {};
  76   void record_start_collection(double time_to_stop_the_world_ms);
  77   void record_yield_time(double yield_time_ms);
  78   void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);
  79   void increment_young_collection_count();
  80   void increment_mixed_collection_count();
  81   void print() const;
  82 };
  83 
  84 class TraceOldGenTimeData : public CHeapObj<mtGC> {
  85  private:
  86   NumberSeq _all_full_gc_times;
  87 
  88  public:
  89   void record_full_collection(double full_gc_time_ms);
  90   void print() const;
  91 };
  92 
  93 // There are three command line options related to the young gen size:
  94 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
  95 // just a short form for NewSize==MaxNewSize). G1 will use its internal
  96 // heuristics to calculate the actual young gen size, so these options
  97 // basically only limit the range within which G1 can pick a young gen
  98 // size. Also, these are general options taking byte sizes. G1 will
  99 // internally work with a number of regions instead. So, some rounding
 100 // will occur.
 101 //
 102 // If nothing related to the the young gen size is set on the command
 103 // line we should allow the young gen to be between G1NewSizePercent
 104 // and G1MaxNewSizePercent of the heap size. This means that every time
 105 // the heap size changes, the limits for the young gen size will be
 106 // recalculated.
 107 //
 108 // If only -XX:NewSize is set we should use the specified value as the
 109 // minimum size for young gen. Still using G1MaxNewSizePercent of the
 110 // heap as maximum.
 111 //
 112 // If only -XX:MaxNewSize is set we should use the specified value as the


 164     return _adaptive_size;
 165   }
 166 };
 167 
 168 class G1CollectorPolicy: public CollectorPolicy {
 169  private:
 170   G1IHOPControl* _ihop_control;
 171 
 172   G1IHOPControl* create_ihop_control() const;
 173   // Update the IHOP control with necessary statistics.
 174   void update_ihop_prediction(double mutator_time_s,
 175                               size_t mutator_alloc_bytes,
 176                               size_t young_gen_size);
 177   void report_ihop_statistics();
 178 
 179   G1Predictions _predictor;
 180 
 181   double get_new_prediction(TruncatedSeq const* seq) const;
 182   size_t get_new_size_prediction(TruncatedSeq const* seq) const;
 183 
 184   // either equal to the number of parallel threads, if ParallelGCThreads
 185   // has been set, or 1 otherwise
 186   int _parallel_gc_threads;
 187 
 188   // The number of GC threads currently active.
 189   uintx _no_of_gc_threads;
 190 
 191   G1MMUTracker* _mmu_tracker;
 192 
 193   void initialize_alignments();
 194   void initialize_flags();
 195 
 196   CollectionSetChooser* _cset_chooser;
 197 
 198   double _full_collection_start_sec;
 199 
 200   // These exclude marking times.
 201   TruncatedSeq* _recent_gc_times_ms;
 202 
 203   TruncatedSeq* _concurrent_mark_remark_times_ms;
 204   TruncatedSeq* _concurrent_mark_cleanup_times_ms;
 205 
 206   // Ratio check data for determining if heap growth is necessary.
 207   uint _ratio_over_threshold_count;
 208   double _ratio_over_threshold_sum;
 209   uint _pauses_since_start;
 210 
 211   TraceYoungGenTimeData _trace_young_gen_time_data;
 212   TraceOldGenTimeData   _trace_old_gen_time_data;
 213 
 214   double _stop_world_start;
 215 
 216   uint _young_list_target_length;
 217   uint _young_list_fixed_length;
 218 
 219   // The max number of regions we can extend the eden by while the GC
 220   // locker is active. This should be >= _young_list_target_length;
 221   uint _young_list_max_length;
 222 
 223   SurvRateGroup* _short_lived_surv_rate_group;
 224   SurvRateGroup* _survivor_surv_rate_group;
 225   // add here any more surv rate groups
 226 
 227   double _gc_overhead_perc;
 228 
 229   double _reserve_factor;
 230   uint   _reserve_regions;
 231 
 232   enum PredictionConstants {
 233     TruncatedSeqLength = 10,
 234     NumPrevPausesForHeuristics = 10,
 235     // MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics,


 269 
 270   uint eden_cset_region_length() const     { return _eden_cset_region_length;     }
 271   uint survivor_cset_region_length() const { return _survivor_cset_region_length; }
 272   uint old_cset_region_length() const      { return _old_cset_region_length;      }
 273 
 274   uint _free_regions_at_end_of_collection;
 275 
 276   size_t _recorded_rs_lengths;
 277   size_t _max_rs_lengths;
 278 
 279   size_t _rs_lengths_prediction;
 280 
 281 #ifndef PRODUCT
 282   bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
 283 #endif // PRODUCT
 284 
 285   void adjust_concurrent_refinement(double update_rs_time,
 286                                     double update_rs_processed_buffers,
 287                                     double goal_ms);
 288 
 289   uintx no_of_gc_threads() { return _no_of_gc_threads; }
 290   void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
 291 
 292   double _pause_time_target_ms;
 293 
 294   size_t _pending_cards;
 295 
 296   // The amount of allocated bytes in old gen during the last mutator and the following
 297   // young GC phase.
 298   size_t _bytes_allocated_in_old_since_last_gc;
 299 
 300   G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
 301 public:
 302   const G1Predictions& predictor() const { return _predictor; }
 303 
 304   // Add the given number of bytes to the total number of allocated bytes in the old gen.
 305   void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
 306 
 307   // Accessors
 308 
 309   void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
 310     hr->set_eden();
 311     hr->install_surv_rate_group(_short_lived_surv_rate_group);


 446   // the recorded info for the evacuation pause.
 447 
 448   enum CSetBuildType {
 449     Active,             // We are actively building the collection set
 450     Inactive            // We are not actively building the collection set
 451   };
 452 
 453   CSetBuildType _inc_cset_build_state;
 454 
 455   // The head of the incrementally built collection set.
 456   HeapRegion* _inc_cset_head;
 457 
 458   // The tail of the incrementally built collection set.
 459   HeapRegion* _inc_cset_tail;
 460 
 461   // The number of bytes in the incrementally built collection set.
 462   // Used to set _collection_set_bytes_used_before at the start of
 463   // an evacuation pause.
 464   size_t _inc_cset_bytes_used_before;
 465 
 466   // Used to record the highest end of heap region in collection set
 467   HeapWord* _inc_cset_max_finger;
 468 
 469   // The RSet lengths recorded for regions in the CSet. It is updated
 470   // by the thread that adds a new region to the CSet. We assume that
 471   // only one thread can be allocating a new CSet region (currently,
 472   // it does so after taking the Heap_lock) hence no need to
 473   // synchronize updates to this field.
 474   size_t _inc_cset_recorded_rs_lengths;
 475 
 476   // A concurrent refinement thread periodically samples the young
 477   // region RSets and needs to update _inc_cset_recorded_rs_lengths as
 478   // the RSets grow. Instead of having to synchronize updates to that
 479   // field we accumulate them in this field and add it to
 480   // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
 481   ssize_t _inc_cset_recorded_rs_lengths_diffs;
 482 
 483   // The predicted elapsed time it will take to collect the regions in
 484   // the CSet. This is updated by the thread that adds a new region to
 485   // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
 486   // MT-safety assumptions.
 487   double _inc_cset_predicted_elapsed_time_ms;
 488 


 639   void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
 640 
 641   // Record the start and end of a full collection.
 642   void record_full_collection_start();
 643   void record_full_collection_end();
 644 
 645   // Must currently be called while the world is stopped.
 646   void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
 647 
 648   // Record start and end of remark.
 649   void record_concurrent_mark_remark_start();
 650   void record_concurrent_mark_remark_end();
 651 
 652   // Record start, end, and completion of cleanup.
 653   void record_concurrent_mark_cleanup_start();
 654   void record_concurrent_mark_cleanup_end();
 655   void record_concurrent_mark_cleanup_completed();
 656 
 657   virtual void print_phases();
 658 
 659   void record_stop_world_start();
 660   void record_concurrent_pause();
 661 
 662   // Record how much space we copied during a GC. This is typically
 663   // called when a GC alloc region is being retired.
 664   void record_bytes_copied_during_gc(size_t bytes) {
 665     _bytes_copied_during_gc += bytes;
 666   }
 667 
 668   // The amount of space we copied during a GC.
 669   size_t bytes_copied_during_gc() const {
 670     return _bytes_copied_during_gc;
 671   }
 672 
 673   size_t collection_set_bytes_used_before() const {
 674     return _collection_set_bytes_used_before;
 675   }
 676 
 677   // Determine whether there are candidate regions so that the
 678   // next GC should be mixed. The two action strings are used
 679   // in the ergo output when the method returns true or false.
 680   bool next_gc_should_be_mixed(const char* true_action_str,
 681                                const char* false_action_str) const;


 750   // This sets the initiate_conc_mark_if_possible() flag to start a
 751   // new cycle, as long as we are not already in one. It's best if it
 752   // is called during a safepoint when the test whether a cycle is in
 753   // progress or not is stable.
 754   bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
 755 
 756   // This is called at the very beginning of an evacuation pause (it
 757   // has to be the first thing that the pause does). If
 758   // initiate_conc_mark_if_possible() is true, and the concurrent
 759   // marking thread has completed its work during the previous cycle,
 760   // it will set during_initial_mark_pause() to so that the pause does
 761   // the initial-mark work and start a marking cycle.
 762   void decide_on_conc_mark_initiation();
 763 
 764   // If an expansion would be appropriate, because recent GC overhead had
 765   // exceeded the desired limit, return an amount to expand by.
 766   virtual size_t expansion_amount();
 767 
 768   // Clear ratio tracking data used by expansion_amount().
 769   void clear_ratio_check_data();
 770 
 771   // Print tracing information.
 772   void print_tracing_info() const;
 773 
 774   // Print stats on young survival ratio
 775   void print_yg_surv_rate_info() const;
 776 
 777   void finished_recalculating_age_indexes(bool is_survivors) {
 778     if (is_survivors) {
 779       _survivor_surv_rate_group->finished_recalculating_age_indexes();
 780     } else {
 781       _short_lived_surv_rate_group->finished_recalculating_age_indexes();
 782     }
 783     // do that for any other surv rate groups
 784   }
 785 
 786   size_t young_list_target_length() const { return _young_list_target_length; }
 787 
 788   bool is_young_list_full() const;
 789 
 790   bool can_expand_young_list() const;
 791 
 792   uint young_list_max_length() const {




  27 
  28 #include "gc/g1/collectionSetChooser.hpp"
  29 #include "gc/g1/g1CollectorState.hpp"
  30 #include "gc/g1/g1GCPhaseTimes.hpp"
  31 #include "gc/g1/g1InCSetState.hpp"
  32 #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
  33 #include "gc/g1/g1MMUTracker.hpp"
  34 #include "gc/g1/g1Predictions.hpp"
  35 #include "gc/shared/collectorPolicy.hpp"
  36 #include "utilities/pair.hpp"
  37 
  38 // A G1CollectorPolicy makes policy decisions that determine the
  39 // characteristics of the collector.  Examples include:
  40 //   * choice of collection set.
  41 //   * when to collect.
  42 
  43 class HeapRegion;
  44 class CollectionSetChooser;
  45 class G1IHOPControl;
  46 














































  47 // There are three command line options related to the young gen size:
  48 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
  49 // just a short form for NewSize==MaxNewSize). G1 will use its internal
  50 // heuristics to calculate the actual young gen size, so these options
  51 // basically only limit the range within which G1 can pick a young gen
  52 // size. Also, these are general options taking byte sizes. G1 will
  53 // internally work with a number of regions instead. So, some rounding
  54 // will occur.
  55 //
  56 // If nothing related to the the young gen size is set on the command
  57 // line we should allow the young gen to be between G1NewSizePercent
  58 // and G1MaxNewSizePercent of the heap size. This means that every time
  59 // the heap size changes, the limits for the young gen size will be
  60 // recalculated.
  61 //
  62 // If only -XX:NewSize is set we should use the specified value as the
  63 // minimum size for young gen. Still using G1MaxNewSizePercent of the
  64 // heap as maximum.
  65 //
  66 // If only -XX:MaxNewSize is set we should use the specified value as the


 118     return _adaptive_size;
 119   }
 120 };
 121 
 122 class G1CollectorPolicy: public CollectorPolicy {
 123  private:
 124   G1IHOPControl* _ihop_control;
 125 
 126   G1IHOPControl* create_ihop_control() const;
 127   // Update the IHOP control with necessary statistics.
 128   void update_ihop_prediction(double mutator_time_s,
 129                               size_t mutator_alloc_bytes,
 130                               size_t young_gen_size);
 131   void report_ihop_statistics();
 132 
 133   G1Predictions _predictor;
 134 
 135   double get_new_prediction(TruncatedSeq const* seq) const;
 136   size_t get_new_size_prediction(TruncatedSeq const* seq) const;
 137 







 138   G1MMUTracker* _mmu_tracker;
 139 
 140   void initialize_alignments();
 141   void initialize_flags();
 142 
 143   CollectionSetChooser* _cset_chooser;
 144 
 145   double _full_collection_start_sec;
 146 
 147   // These exclude marking times.
 148   TruncatedSeq* _recent_gc_times_ms;
 149 
 150   TruncatedSeq* _concurrent_mark_remark_times_ms;
 151   TruncatedSeq* _concurrent_mark_cleanup_times_ms;
 152 
 153   // Ratio check data for determining if heap growth is necessary.
 154   uint _ratio_over_threshold_count;
 155   double _ratio_over_threshold_sum;
 156   uint _pauses_since_start;
 157 





 158   uint _young_list_target_length;
 159   uint _young_list_fixed_length;
 160 
 161   // The max number of regions we can extend the eden by while the GC
 162   // locker is active. This should be >= _young_list_target_length;
 163   uint _young_list_max_length;
 164 
 165   SurvRateGroup* _short_lived_surv_rate_group;
 166   SurvRateGroup* _survivor_surv_rate_group;
 167   // add here any more surv rate groups
 168 
 169   double _gc_overhead_perc;
 170 
 171   double _reserve_factor;
 172   uint   _reserve_regions;
 173 
 174   enum PredictionConstants {
 175     TruncatedSeqLength = 10,
 176     NumPrevPausesForHeuristics = 10,
 177     // MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics,


 211 
 212   uint eden_cset_region_length() const     { return _eden_cset_region_length;     }
 213   uint survivor_cset_region_length() const { return _survivor_cset_region_length; }
 214   uint old_cset_region_length() const      { return _old_cset_region_length;      }
 215 
 216   uint _free_regions_at_end_of_collection;
 217 
 218   size_t _recorded_rs_lengths;
 219   size_t _max_rs_lengths;
 220 
 221   size_t _rs_lengths_prediction;
 222 
 223 #ifndef PRODUCT
 224   bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
 225 #endif // PRODUCT
 226 
 227   void adjust_concurrent_refinement(double update_rs_time,
 228                                     double update_rs_processed_buffers,
 229                                     double goal_ms);
 230 



 231   double _pause_time_target_ms;
 232 
 233   size_t _pending_cards;
 234 
 235   // The amount of allocated bytes in old gen during the last mutator and the following
 236   // young GC phase.
 237   size_t _bytes_allocated_in_old_since_last_gc;
 238 
 239   G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
 240 public:
 241   const G1Predictions& predictor() const { return _predictor; }
 242 
 243   // Add the given number of bytes to the total number of allocated bytes in the old gen.
 244   void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
 245 
 246   // Accessors
 247 
 248   void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
 249     hr->set_eden();
 250     hr->install_surv_rate_group(_short_lived_surv_rate_group);


 385   // the recorded info for the evacuation pause.
 386 
 387   enum CSetBuildType {
 388     Active,             // We are actively building the collection set
 389     Inactive            // We are not actively building the collection set
 390   };
 391 
 392   CSetBuildType _inc_cset_build_state;
 393 
 394   // The head of the incrementally built collection set.
 395   HeapRegion* _inc_cset_head;
 396 
 397   // The tail of the incrementally built collection set.
 398   HeapRegion* _inc_cset_tail;
 399 
 400   // The number of bytes in the incrementally built collection set.
 401   // Used to set _collection_set_bytes_used_before at the start of
 402   // an evacuation pause.
 403   size_t _inc_cset_bytes_used_before;
 404 



 405   // The RSet lengths recorded for regions in the CSet. It is updated
 406   // by the thread that adds a new region to the CSet. We assume that
 407   // only one thread can be allocating a new CSet region (currently,
 408   // it does so after taking the Heap_lock) hence no need to
 409   // synchronize updates to this field.
 410   size_t _inc_cset_recorded_rs_lengths;
 411 
 412   // A concurrent refinement thread periodically samples the young
 413   // region RSets and needs to update _inc_cset_recorded_rs_lengths as
 414   // the RSets grow. Instead of having to synchronize updates to that
 415   // field we accumulate them in this field and add it to
 416   // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
 417   ssize_t _inc_cset_recorded_rs_lengths_diffs;
 418 
 419   // The predicted elapsed time it will take to collect the regions in
 420   // the CSet. This is updated by the thread that adds a new region to
 421   // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
 422   // MT-safety assumptions.
 423   double _inc_cset_predicted_elapsed_time_ms;
 424 


 575   void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
 576 
 577   // Record the start and end of a full collection.
 578   void record_full_collection_start();
 579   void record_full_collection_end();
 580 
 581   // Must currently be called while the world is stopped.
 582   void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
 583 
 584   // Record start and end of remark.
 585   void record_concurrent_mark_remark_start();
 586   void record_concurrent_mark_remark_end();
 587 
 588   // Record start, end, and completion of cleanup.
 589   void record_concurrent_mark_cleanup_start();
 590   void record_concurrent_mark_cleanup_end();
 591   void record_concurrent_mark_cleanup_completed();
 592 
 593   virtual void print_phases();
 594 



 595   // Record how much space we copied during a GC. This is typically
 596   // called when a GC alloc region is being retired.
 597   void record_bytes_copied_during_gc(size_t bytes) {
 598     _bytes_copied_during_gc += bytes;
 599   }
 600 
 601   // The amount of space we copied during a GC.
 602   size_t bytes_copied_during_gc() const {
 603     return _bytes_copied_during_gc;
 604   }
 605 
 606   size_t collection_set_bytes_used_before() const {
 607     return _collection_set_bytes_used_before;
 608   }
 609 
 610   // Determine whether there are candidate regions so that the
 611   // next GC should be mixed. The two action strings are used
 612   // in the ergo output when the method returns true or false.
 613   bool next_gc_should_be_mixed(const char* true_action_str,
 614                                const char* false_action_str) const;


 683   // This sets the initiate_conc_mark_if_possible() flag to start a
 684   // new cycle, as long as we are not already in one. It's best if it
 685   // is called during a safepoint when the test whether a cycle is in
 686   // progress or not is stable.
 687   bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
 688 
 689   // This is called at the very beginning of an evacuation pause (it
 690   // has to be the first thing that the pause does). If
 691   // initiate_conc_mark_if_possible() is true, and the concurrent
 692   // marking thread has completed its work during the previous cycle,
 693   // it will set during_initial_mark_pause() to so that the pause does
 694   // the initial-mark work and start a marking cycle.
 695   void decide_on_conc_mark_initiation();
 696 
 697   // If an expansion would be appropriate, because recent GC overhead had
 698   // exceeded the desired limit, return an amount to expand by.
 699   virtual size_t expansion_amount();
 700 
 701   // Clear ratio tracking data used by expansion_amount().
 702   void clear_ratio_check_data();



 703 
 704   // Print stats on young survival ratio
 705   void print_yg_surv_rate_info() const;
 706 
 707   void finished_recalculating_age_indexes(bool is_survivors) {
 708     if (is_survivors) {
 709       _survivor_surv_rate_group->finished_recalculating_age_indexes();
 710     } else {
 711       _short_lived_surv_rate_group->finished_recalculating_age_indexes();
 712     }
 713     // do that for any other surv rate groups
 714   }
 715 
 716   size_t young_list_target_length() const { return _young_list_target_length; }
 717 
 718   bool is_young_list_full() const;
 719 
 720   bool can_expand_young_list() const;
 721 
 722   uint young_list_max_length() const {


< prev index next >