< prev index next >

src/hotspot/share/gc/g1/g1CollectionSet.hpp

Print this page
rev 54087 : imported patch 8218668-reorganize-collection-set


  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTIONSET_HPP
  26 #define SHARE_GC_G1_G1COLLECTIONSET_HPP
  27 
  28 #include "utilities/debug.hpp"
  29 #include "utilities/globalDefinitions.hpp"
  30 
  31 class G1CollectedHeap;
  32 class G1CollectionSetCandidates;
  33 class G1CollectorState;
  34 class G1GCPhaseTimes;
  35 class G1ParScanThreadStateSet;
  36 class G1Policy;
  37 class G1SurvivorRegions;
  38 class HeapRegion;
  39 class HeapRegionClosure;
  40 





















  41 class G1CollectionSet {
  42   G1CollectedHeap* _g1h;
  43   G1Policy* _policy;
  44 
  45   // All old gen collection set candidate regions for the current mixed gc phase.
  46   G1CollectionSetCandidates* _candidates;
  47 
  48   uint _eden_region_length;
  49   uint _survivor_region_length;
  50   uint _old_region_length;
  51 
  52   // The actual collection set as a set of region indices.
  53   // All entries in _collection_set_regions below _collection_set_cur_length are
  54   // assumed to be valid entries.
  55   // We assume that at any time there is at most only one writer and (one or more)
  56   // concurrent readers. This means we are good with using storestore and loadload
  57   // barriers on the writer and reader respectively only.
  58   uint* _collection_set_regions;
  59   volatile size_t _collection_set_cur_length;
  60   size_t _collection_set_max_length;
  61 
  62   // When doing mixed collections we can add old regions to the collection, which
  63   // can be collected if there is enough time. We call these optional regions and
  64   // the pointer to these regions are stored in the array below.
  65   HeapRegion** _optional_regions;
  66   uint _optional_region_length;
  67   uint _optional_region_max_length;
  68 
  69   // The number of bytes in the collection set before the pause. Set from
  70   // the incrementally built collection set at the start of an evacuation
  71   // pause, and incremented in finalize_old_part() when adding old regions
  72   // (if any) to the collection set.
  73   size_t _bytes_used_before;
  74 



  75   size_t _recorded_rs_lengths;
  76 
  77   // The associated information that is maintained while the incremental
  78   // collection set is being built with young regions. Used to populate
  79   // the recorded info for the evacuation pause.
  80 
  81   enum CSetBuildType {
  82     Active,             // We are actively building the collection set
  83     Inactive            // We are not actively building the collection set
  84   };
  85 
  86   CSetBuildType _inc_build_state;





  87 
  88   // The number of bytes in the incrementally built collection set.
  89   // Used to set _collection_set_bytes_used_before at the start of
  90   // an evacuation pause.
  91   size_t _inc_bytes_used_before;
  92 
  93   // The RSet lengths recorded for regions in the CSet. It is updated
  94   // by the thread that adds a new region to the CSet. We assume that
  95   // only one thread can be allocating a new CSet region (currently,
  96   // it does so after taking the Heap_lock) hence no need to
  97   // synchronize updates to this field.
  98   size_t _inc_recorded_rs_lengths;
  99 
 100   // A concurrent refinement thread periodically samples the young
 101   // region RSets and needs to update _inc_recorded_rs_lengths as
 102   // the RSets grow. Instead of having to synchronize updates to that
 103   // field we accumulate them in this field and add it to
 104   // _inc_recorded_rs_lengths_diffs at the start of a GC.
 105   ssize_t _inc_recorded_rs_lengths_diffs;
 106 
 107   // The predicted elapsed time it will take to collect the regions in
 108   // the CSet. This is updated by the thread that adds a new region to
 109   // the CSet. See the comment for _inc_recorded_rs_lengths about
 110   // MT-safety assumptions.
 111   double _inc_predicted_elapsed_time_ms;
 112 
 113   // See the comment for _inc_recorded_rs_lengths_diffs.
 114   double _inc_predicted_elapsed_time_ms_diffs;
 115 
 116   G1CollectorState* collector_state();
 117   G1GCPhaseTimes* phase_times();
 118 
 119   void verify_young_cset_indices() const NOT_DEBUG_RETURN;
 120   void add_as_optional(HeapRegion* hr);
 121   void add_as_old(HeapRegion* hr);
 122   bool optional_is_full();
 123 





















 124 public:
 125   G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
 126   ~G1CollectionSet();
 127 
 128   // Initializes the collection set giving the maximum possible length of the collection set.
 129   void initialize(uint max_region_length);
 130   void initialize_optional(uint max_length);
 131   void free_optional_regions();
 132 
 133   void clear_candidates();
 134 
 135   void set_candidates(G1CollectionSetCandidates* candidates) {
 136     assert(_candidates == NULL, "Trying to replace collection set candidates.");
 137     _candidates = candidates;
 138   }
 139   G1CollectionSetCandidates* candidates() { return _candidates; }
 140 
 141   void init_region_lengths(uint eden_cset_region_length,
 142                            uint survivor_cset_region_length);
 143 
 144   void set_recorded_rs_lengths(size_t rs_lengths);
 145 
 146   uint region_length() const       { return young_region_length() +
 147                                             old_region_length(); }
 148   uint young_region_length() const { return eden_region_length() +
 149                                             survivor_region_length(); }
 150 
 151   uint eden_region_length() const     { return _eden_region_length;     }
 152   uint survivor_region_length() const { return _survivor_region_length; }
 153   uint old_region_length() const      { return _old_region_length;      }
 154   uint optional_region_length() const { return _optional_region_length; }



 155 
 156   // Incremental collection set support
 157 
 158   // Initialize incremental collection set info.
 159   void start_incremental_building();




 160 
 161   // Perform any final calculations on the incremental collection set fields
 162   // before we can use them.
 163   void finalize_incremental_building();
 164 
 165   // Reset the contents of the collection set.
 166   void clear();
 167 
 168   // Iterate over the collection set, applying the given HeapRegionClosure on all of them.
 169   // If may_be_aborted is true, iteration may be aborted using the return value of the
 170   // called closure method.
 171   void iterate(HeapRegionClosure* cl) const;
 172 
 173   // Iterate over the collection set, applying the given HeapRegionClosure on all of them,
 174   // trying to optimally spread out starting position of total_workers workers given the
 175   // caller's worker_id.
 176   void iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
 177 
 178   // Stop adding regions to the incremental collection set.
 179   void stop_incremental_building() { _inc_build_state = Inactive; }
 180 
 181   size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
 182 
 183   size_t bytes_used_before() const {
 184     return _bytes_used_before;
 185   }
 186 
 187   void reset_bytes_used_before() {
 188     _bytes_used_before = 0;
 189   }
 190 
 191   // Choose a new collection set.  Marks the chosen regions as being
 192   // "in_collection_set".
 193   double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
 194   void finalize_old_part(double time_remaining_ms);
 195 
 196   // Add old region "hr" to the collection set.
 197   void add_old_region(HeapRegion* hr);
 198 
 199   // Add old region "hr" to optional collection set.
 200   void add_optional_region(HeapRegion* hr);
 201 
 202   // Update information about hr in the aggregated information for
 203   // the incrementally built collection set.
 204   void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
 205 
 206   // Add eden region to the collection set.
 207   void add_eden_region(HeapRegion* hr);
 208 
 209   // Add survivor region to the collection set.
 210   void add_survivor_regions(HeapRegion* hr);
 211 
 212 #ifndef PRODUCT
 213   bool verify_young_ages();
 214 
 215   void print(outputStream* st);
 216 #endif // !PRODUCT
 217 
 218   double predict_region_elapsed_time_ms(HeapRegion* hr);
 219 
 220   void clear_optional_region(const HeapRegion* hr);
 221 
 222   HeapRegion* optional_region_at(uint i) const {
 223     assert(_optional_regions != NULL, "Not yet initialized");
 224     assert(i < _optional_region_length, "index %u out of bounds (%u)", i, _optional_region_length);
 225     return _optional_regions[i];
 226   }
 227 
 228   HeapRegion* remove_last_optional_region() {
 229     assert(_optional_regions != NULL, "Not yet initialized");
 230     assert(_optional_region_length != 0, "No region to remove");
 231     _optional_region_length--;
 232     HeapRegion* removed = _optional_regions[_optional_region_length];
 233     _optional_regions[_optional_region_length] = NULL;
 234     return removed;
 235   }
 236 
 237 private:
 238   // Update the incremental collection set information when adding a region.
 239   void add_young_region_common(HeapRegion* hr);
 240 };
 241 
 242 // Helper class to manage the optional regions in a Mixed collection.
 243 class G1OptionalCSet : public StackObj {
 244 private:
 245   G1CollectionSet* _cset;
 246   G1ParScanThreadStateSet* _pset;
 247   uint _current_index;
 248   uint _current_limit;
 249   bool _prepare_failed;
 250   bool _evacuation_failed;
 251 
 252   void prepare_to_evacuate_optional_region(HeapRegion* hr);
 253 
 254 public:
 255   static const uint InvalidCSetIndex = UINT_MAX;
 256 
 257   G1OptionalCSet(G1CollectionSet* cset, G1ParScanThreadStateSet* pset) :
 258     _cset(cset),
 259     _pset(pset),
 260     _current_index(0),
 261     _current_limit(0),
 262     _prepare_failed(false),
 263     _evacuation_failed(false) { }
 264   // The destructor returns regions to the collection set candidates set and
 265   // frees the optional structure in the collection set.
 266   ~G1OptionalCSet();
 267 
 268   uint current_index() { return _current_index; }
 269   uint current_limit() { return _current_limit; }
 270 
 271   uint size();
 272   bool is_empty();
 273 
 274   HeapRegion* region_at(uint index);
 275 
 276   // Prepare a set of regions for optional evacuation.
 277   void prepare_evacuation(double time_left_ms);
 278   bool prepare_failed();
 279 
 280   // Complete the evacuation of the previously prepared
 281   // regions by updating their state and check for failures.
 282   void complete_evacuation();
 283   bool evacuation_failed();
 284 };
 285 
 286 #endif // SHARE_GC_G1_G1COLLECTIONSET_HPP


  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTIONSET_HPP
  26 #define SHARE_GC_G1_G1COLLECTIONSET_HPP
  27 
  28 #include "utilities/debug.hpp"
  29 #include "utilities/globalDefinitions.hpp"
  30 
  31 class G1CollectedHeap;
  32 class G1CollectionSetCandidates;
  33 class G1CollectorState;
  34 class G1GCPhaseTimes;
  35 class G1ParScanThreadStateSet;
  36 class G1Policy;
  37 class G1SurvivorRegions;
  38 class HeapRegion;
  39 class HeapRegionClosure;
  40 
  41 // The collection set.
  42 //
  43 // The collection set is built incrementally: it starts off with the set of
  44 // survivor regions, and at mutator time G1 adds retired up eden regions to it.
  45 //
  46 // For non-mixed collections this is all the collection set consists of and its
  47 // regions are evacuated in one pass.
  48 //
  49 // For mixed collections we not only determine a few old gen regions for an initial
  50 // collection set, but also a set of optional collection set regions from the
  51 // collection set candidates.
  52 //
  53 // After evacuating the initial collection set, G1 incrementally selects more
  54 // regions from the optional collection set regions as time prediction permit.
  55 //
  56 // Support for incremental building is implemented by keeping an index into the
  57 // collection set set; during evacuation only the part from that index to the
  58 // end is used for evacuation.
  59 //
  60 // This results in having a single complete collection set of the evacuation phases
  61 // for cleaning up.
  62 class G1CollectionSet {
  63   G1CollectedHeap* _g1h;
  64   G1Policy* _policy;
  65 
  66   // All old gen collection set candidate regions for the current mixed phase.
  67   G1CollectionSetCandidates* _candidates;
  68 
  69   uint _eden_region_length;
  70   uint _survivor_region_length;
  71   uint _old_region_length;
  72 
  73   // The actual collection set as a set of region indices.
  74   // All entries in _collection_set_regions below _collection_set_cur_length are
  75   // assumed to be part of the collection set.
  76   // We assume that at any time there is at most only one writer and (one or more)
  77   // concurrent readers. This means we are good with using storestore and loadload
  78   // barriers on the writer and reader respectively only.
  79   uint* _collection_set_regions;
  80   volatile size_t _collection_set_cur_length;
  81   size_t _collection_set_max_length;
  82 
  83   // When doing mixed collections we can add old regions to the collection set, which
  84   // will be collected only if there is enough time. We call these optional regions.
  85   // This member records the current number of regions that are of that type that
  86   // correspond to the first x entries in the collection set candidates.
  87   uint _num_optional_regions;

  88 
  89   // The number of bytes in the collection set before the pause. Set from
  90   // the incrementally built collection set at the start of an evacuation
  91   // pause, and updated as more regions are added to the collection set.

  92   size_t _bytes_used_before;
  93 
  94   // The number of cards in the remembered set in the collection set. Set from
  95   // the incrementally built collection set at the start of an evacuation
  96   // pause, and updated as more regions are added to the collection set.
  97   size_t _recorded_rs_lengths;
  98 




  99   enum CSetBuildType {
 100     Active,             // We are actively building the collection set
 101     Inactive            // We are not actively building the collection set
 102   };
 103 
 104   CSetBuildType _inc_build_state;
 105   size_t _inc_part_start;
 106 
 107   // The associated information that is maintained while the incremental
 108   // collection set is being built with *young* regions. Used to populate
 109   // the recorded info for the evacuation pause.
 110 
 111   // The number of bytes in the incrementally built collection set.
 112   // Used to set _collection_set_bytes_used_before at the start of
 113   // an evacuation pause.
 114   size_t _inc_bytes_used_before;
 115 
 116   // The RSet lengths recorded for regions in the CSet. It is updated
 117   // by the thread that adds a new region to the CSet. We assume that
 118   // only one thread can be allocating a new CSet region (currently,
 119   // it does so after taking the Heap_lock) hence no need to
 120   // synchronize updates to this field.
 121   size_t _inc_recorded_rs_lengths;
 122 
 123   // A concurrent refinement thread periodically samples the young
 124   // region RSets and needs to update _inc_recorded_rs_lengths as
 125   // the RSets grow. Instead of having to synchronize updates to that
 126   // field we accumulate them in this field and add it to
 127   // _inc_recorded_rs_lengths_diffs at the start of a GC.
 128   ssize_t _inc_recorded_rs_lengths_diffs;
 129 
 130   // The predicted elapsed time it will take to collect the regions in
 131   // the CSet. This is updated by the thread that adds a new region to
 132   // the CSet. See the comment for _inc_recorded_rs_lengths about
 133   // MT-safety assumptions.
 134   double _inc_predicted_elapsed_time_ms;
 135 
 136   // See the comment for _inc_recorded_rs_lengths_diffs.
 137   double _inc_predicted_elapsed_time_ms_diffs;
 138 
 139   G1CollectorState* collector_state();
 140   G1GCPhaseTimes* phase_times();
 141 
 142   void verify_young_cset_indices() const NOT_DEBUG_RETURN;



 143 
 144   double predict_region_elapsed_time_ms(HeapRegion* hr);
 145 
 146   // Update the incremental collection set information when adding a region.
 147   void add_young_region_common(HeapRegion* hr);
 148 
 149   // Add old region "hr" to the collection set.
 150   void add_old_region(HeapRegion* hr);
 151 
 152   // Add old region "hr" to optional collection set.
 153   void add_optional_region(HeapRegion* hr);
 154 
 155   void move_candidates_to_collection_set(uint num_regions);
 156 
 157   // Choose a new collection set.  Marks the chosen regions as being
 158   // "in_collection_set".
 159   double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
 160   // Perform any final calculations on the incremental collection set fields
 161   // before we can use them.
 162   void finalize_young_increment();
 163 
 164   void finalize_old_part(double time_remaining_ms);
 165 public:
 166   G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
 167   ~G1CollectionSet();
 168 
 169   // Initializes the collection set giving the maximum possible length of the collection set.
 170   void initialize(uint max_region_length);
 171   void initialize_optional(uint max_length);
 172   void free_optional_regions();
 173 
 174   void clear_candidates();
 175 
 176   void set_candidates(G1CollectionSetCandidates* candidates) {
 177     assert(_candidates == NULL, "Trying to replace collection set candidates.");
 178     _candidates = candidates;
 179   }
 180   G1CollectionSetCandidates* candidates() { return _candidates; }
 181 
 182   void init_region_lengths(uint eden_cset_region_length,
 183                            uint survivor_cset_region_length);
 184 
 185   void set_recorded_rs_lengths(size_t rs_lengths);
 186 
 187   uint region_length() const       { return young_region_length() +
 188                                             old_region_length(); }
 189   uint young_region_length() const { return eden_region_length() +
 190                                             survivor_region_length(); }
 191 
 192   uint eden_region_length() const     { return _eden_region_length;     }
 193   uint survivor_region_length() const { return _survivor_region_length; }
 194   uint old_region_length() const      { return _old_region_length;      }
 195   uint optional_region_length() const { return _num_optional_regions; }
 196 
 197   // Reset the contents of the collection set.
 198   void clear();
 199 
 200   // Incremental collection set support
 201 
 202   // Initialize incremental collection set info.
 203   void start_incremental_building();
 204   // Start a new collection set increment.
 205   void update_incremental_marker() { _inc_build_state = Active; _inc_part_start = _collection_set_cur_length; }
 206   // Stop adding regions to the current collection set increment.
 207   void stop_incremental_building() { _inc_build_state = Inactive; }
 208 
 209   // Iterate over the current collection set increment applying the given HeapRegionClosure
 210   // from a starting position determined by the given worker id.
 211   void iterate_incremental_part_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;



 212 
 213   // Iterate over the entire collection set (all increments calculated so far), applying
 214   // the given HeapRegionClosure on all of them.

 215   void iterate(HeapRegionClosure* cl) const;
 216 
 217   void iterate_optional(HeapRegionClosure* cl) const;






 218 
 219   size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
 220 
 221   size_t bytes_used_before() const {
 222     return _bytes_used_before;
 223   }
 224 
 225   void reset_bytes_used_before() {
 226     _bytes_used_before = 0;
 227   }
 228 
 229   // Finalize the initial (first) collection set consisting of all young regions and a
 230   // few old gen regions.
 231   void finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);
 232   // Finalize the next collection set from the set of available optional old gen regions.
 233   bool finalize_optional_for_evacuation(double remaining_pause_time);
 234   // Abandon (clean up) optional collection set regions that were not evacuated in this
 235   // pause.
 236   void abandon_optional_collection_set(G1ParScanThreadStateSet* pss);


 237 
 238   // Update information about hr in the aggregated information for
 239   // the incrementally built collection set.
 240   void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
 241 
 242   // Add eden region to the collection set.
 243   void add_eden_region(HeapRegion* hr);
 244 
 245   // Add survivor region to the collection set.
 246   void add_survivor_regions(HeapRegion* hr);
 247 
 248 #ifndef PRODUCT
 249   bool verify_young_ages();
 250 
 251   void print(outputStream* st);
 252 #endif // !PRODUCT



































































 253 };
 254 
 255 #endif // SHARE_GC_G1_G1COLLECTIONSET_HPP
< prev index next >