15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
26 #define SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
27
28 #include "gc/g1/collectionSetChooser.hpp"
29 #include "utilities/debug.hpp"
30 #include "utilities/globalDefinitions.hpp"
31
32 class G1CollectedHeap;
33 class G1CollectorState;
34 class G1GCPhaseTimes;
35 class G1Policy;
36 class G1SurvivorRegions;
37 class HeapRegion;
38
39 class G1CollectionSet {
40 G1CollectedHeap* _g1h;
41 G1Policy* _policy;
42
43 CollectionSetChooser* _cset_chooser;
44
45 uint _eden_region_length;
46 uint _survivor_region_length;
47 uint _old_region_length;
48
49 // The actual collection set as a set of region indices.
50 // All entries in _collection_set_regions below _collection_set_cur_length are
51 // assumed to be valid entries.
52 // We assume that at any time there is at most only one writer and (one or more)
53 // concurrent readers. This means we are good with using storestore and loadload
54 // barriers on the writer and reader respectively only.
55 uint* _collection_set_regions;
56 volatile size_t _collection_set_cur_length;
57 size_t _collection_set_max_length;
58
59 // The number of bytes in the collection set before the pause. Set from
60 // the incrementally built collection set at the start of an evacuation
61 // pause, and incremented in finalize_old_part() when adding old regions
62 // (if any) to the collection set.
63 size_t _bytes_used_before;
64
65 size_t _recorded_rs_lengths;
66
67 // The associated information that is maintained while the incremental
68 // collection set is being built with young regions. Used to populate
69 // the recorded info for the evacuation pause.
70
71 enum CSetBuildType {
72 Active, // We are actively building the collection set
73 Inactive // We are not actively building the collection set
74 };
75
76 CSetBuildType _inc_build_state;
77
78 // The number of bytes in the incrementally built collection set.
89
90 // A concurrent refinement thread periodically samples the young
91 // region RSets and needs to update _inc_recorded_rs_lengths as
92 // the RSets grow. Instead of having to synchronize updates to that
93 // field we accumulate them in this field and add it to
94 // _inc_recorded_rs_lengths_diffs at the start of a GC.
95 ssize_t _inc_recorded_rs_lengths_diffs;
96
97 // The predicted elapsed time it will take to collect the regions in
98 // the CSet. This is updated by the thread that adds a new region to
99 // the CSet. See the comment for _inc_recorded_rs_lengths about
100 // MT-safety assumptions.
101 double _inc_predicted_elapsed_time_ms;
102
103 // See the comment for _inc_recorded_rs_lengths_diffs.
104 double _inc_predicted_elapsed_time_ms_diffs;
105
106 G1CollectorState* collector_state();
107 G1GCPhaseTimes* phase_times();
108
109 double predict_region_elapsed_time_ms(HeapRegion* hr);
110
111 void verify_young_cset_indices() const NOT_DEBUG_RETURN;
112 public:
113 G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
114 ~G1CollectionSet();
115
116 // Initializes the collection set giving the maximum possible length of the collection set.
117 void initialize(uint max_region_length);
118
119 CollectionSetChooser* cset_chooser();
120
121 void init_region_lengths(uint eden_cset_region_length,
122 uint survivor_cset_region_length);
123
124 void set_recorded_rs_lengths(size_t rs_lengths);
125
126 uint region_length() const { return young_region_length() +
127 old_region_length(); }
128 uint young_region_length() const { return eden_region_length() +
129 survivor_region_length(); }
130
131 uint eden_region_length() const { return _eden_region_length; }
132 uint survivor_region_length() const { return _survivor_region_length; }
133 uint old_region_length() const { return _old_region_length; }
134
135 // Incremental collection set support
136
137 // Initialize incremental collection set info.
138 void start_incremental_building();
139
140 // Perform any final calculations on the incremental collection set fields
141 // before we can use them.
142 void finalize_incremental_building();
143
144 // Reset the contents of the collection set.
145 void clear();
146
147 // Iterate over the collection set, applying the given HeapRegionClosure on all of them.
148 // If may_be_aborted is true, iteration may be aborted using the return value of the
149 // called closure method.
150 void iterate(HeapRegionClosure* cl) const;
151
152 // Iterate over the collection set, applying the given HeapRegionClosure on all of them,
153 // trying to optimally spread out starting position of total_workers workers given the
158 void stop_incremental_building() { _inc_build_state = Inactive; }
159
160 size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
161
162 size_t bytes_used_before() const {
163 return _bytes_used_before;
164 }
165
166 void reset_bytes_used_before() {
167 _bytes_used_before = 0;
168 }
169
170 // Choose a new collection set. Marks the chosen regions as being
171 // "in_collection_set".
172 double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
173 void finalize_old_part(double time_remaining_ms);
174
175 // Add old region "hr" to the collection set.
176 void add_old_region(HeapRegion* hr);
177
178 // Update information about hr in the aggregated information for
179 // the incrementally built collection set.
180 void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
181
182 // Add eden region to the collection set.
183 void add_eden_region(HeapRegion* hr);
184
185 // Add survivor region to the collection set.
186 void add_survivor_regions(HeapRegion* hr);
187
188 #ifndef PRODUCT
189 bool verify_young_ages();
190
191 void print(outputStream* st);
192 #endif // !PRODUCT
193
194 private:
195 // Update the incremental collection set information when adding a region.
196 void add_young_region_common(HeapRegion* hr);
197 };
198
199 #endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
200
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
26 #define SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
27
28 #include "gc/g1/collectionSetChooser.hpp"
29 #include "utilities/debug.hpp"
30 #include "utilities/globalDefinitions.hpp"
31
32 class G1CollectedHeap;
33 class G1CollectorState;
34 class G1GCPhaseTimes;
35 class G1ParScanThreadStateSet;
36 class G1Policy;
37 class G1SurvivorRegions;
38 class HeapRegion;
39
40 class G1CollectionSet {
41 G1CollectedHeap* _g1h;
42 G1Policy* _policy;
43
44 CollectionSetChooser* _cset_chooser;
45
46 uint _eden_region_length;
47 uint _survivor_region_length;
48 uint _old_region_length;
49
50 // The actual collection set as a set of region indices.
51 // All entries in _collection_set_regions below _collection_set_cur_length are
52 // assumed to be valid entries.
53 // We assume that at any time there is at most only one writer and (one or more)
54 // concurrent readers. This means we are good with using storestore and loadload
55 // barriers on the writer and reader respectively only.
56 uint* _collection_set_regions;
57 volatile size_t _collection_set_cur_length;
58 size_t _collection_set_max_length;
59
60 // When doing mixed collections we can add old regions to the collection, which
61 // can be collected if there is enough time. We call these optional regions and
62 // the pointer to these regions are stored in the array below.
63 HeapRegion** _optional_regions;
64 uint _optional_region_length;
65 uint _optional_region_max_length;
66
67 // The number of bytes in the collection set before the pause. Set from
68 // the incrementally built collection set at the start of an evacuation
69 // pause, and incremented in finalize_old_part() when adding old regions
70 // (if any) to the collection set.
71 size_t _bytes_used_before;
72
73 size_t _recorded_rs_lengths;
74
75 // The associated information that is maintained while the incremental
76 // collection set is being built with young regions. Used to populate
77 // the recorded info for the evacuation pause.
78
79 enum CSetBuildType {
80 Active, // We are actively building the collection set
81 Inactive // We are not actively building the collection set
82 };
83
84 CSetBuildType _inc_build_state;
85
86 // The number of bytes in the incrementally built collection set.
97
98 // A concurrent refinement thread periodically samples the young
99 // region RSets and needs to update _inc_recorded_rs_lengths as
100 // the RSets grow. Instead of having to synchronize updates to that
101 // field we accumulate them in this field and add it to
102 // _inc_recorded_rs_lengths_diffs at the start of a GC.
103 ssize_t _inc_recorded_rs_lengths_diffs;
104
105 // The predicted elapsed time it will take to collect the regions in
106 // the CSet. This is updated by the thread that adds a new region to
107 // the CSet. See the comment for _inc_recorded_rs_lengths about
108 // MT-safety assumptions.
109 double _inc_predicted_elapsed_time_ms;
110
111 // See the comment for _inc_recorded_rs_lengths_diffs.
112 double _inc_predicted_elapsed_time_ms_diffs;
113
114 G1CollectorState* collector_state();
115 G1GCPhaseTimes* phase_times();
116
117 void verify_young_cset_indices() const NOT_DEBUG_RETURN;
118 void add_as_optional(HeapRegion* hr);
119 void add_as_old(HeapRegion* hr);
120 bool optional_is_full();
121
122 public:
123 G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
124 ~G1CollectionSet();
125
126 // Initializes the collection set giving the maximum possible length of the collection set.
127 void initialize(uint max_region_length);
128 void initialize_optional(uint max_length);
129 void free_optional_regions();
130
131 CollectionSetChooser* cset_chooser();
132
133 void init_region_lengths(uint eden_cset_region_length,
134 uint survivor_cset_region_length);
135
136 void set_recorded_rs_lengths(size_t rs_lengths);
137
138 uint region_length() const { return young_region_length() +
139 old_region_length(); }
140 uint young_region_length() const { return eden_region_length() +
141 survivor_region_length(); }
142
143 uint eden_region_length() const { return _eden_region_length; }
144 uint survivor_region_length() const { return _survivor_region_length; }
145 uint old_region_length() const { return _old_region_length; }
146 uint optional_region_length() const { return _optional_region_length; }
147
148 // Incremental collection set support
149
150 // Initialize incremental collection set info.
151 void start_incremental_building();
152
153 // Perform any final calculations on the incremental collection set fields
154 // before we can use them.
155 void finalize_incremental_building();
156
157 // Reset the contents of the collection set.
158 void clear();
159
160 // Iterate over the collection set, applying the given HeapRegionClosure on all of them.
161 // If may_be_aborted is true, iteration may be aborted using the return value of the
162 // called closure method.
163 void iterate(HeapRegionClosure* cl) const;
164
165 // Iterate over the collection set, applying the given HeapRegionClosure on all of them,
166 // trying to optimally spread out starting position of total_workers workers given the
171 void stop_incremental_building() { _inc_build_state = Inactive; }
172
173 size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
174
175 size_t bytes_used_before() const {
176 return _bytes_used_before;
177 }
178
179 void reset_bytes_used_before() {
180 _bytes_used_before = 0;
181 }
182
183 // Choose a new collection set. Marks the chosen regions as being
184 // "in_collection_set".
185 double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
186 void finalize_old_part(double time_remaining_ms);
187
188 // Add old region "hr" to the collection set.
189 void add_old_region(HeapRegion* hr);
190
191 // Add old region "hr" to optional collection set.
192 void add_optional_region(HeapRegion* hr);
193
194 // Update information about hr in the aggregated information for
195 // the incrementally built collection set.
196 void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
197
198 // Add eden region to the collection set.
199 void add_eden_region(HeapRegion* hr);
200
201 // Add survivor region to the collection set.
202 void add_survivor_regions(HeapRegion* hr);
203
204 #ifndef PRODUCT
205 bool verify_young_ages();
206
207 void print(outputStream* st);
208 #endif // !PRODUCT
209
210 double predict_region_elapsed_time_ms(HeapRegion* hr);
211
212 void clear_optional_region(const HeapRegion* hr);
213
214 HeapRegion* optional_region_at(uint i) const {
215 assert(_optional_regions != NULL, "Not yet initialized");
216 assert(i < _optional_region_length, "index %u out of bounds (%u)", i, _optional_region_length);
217 return _optional_regions[i];
218 }
219
220 HeapRegion* remove_last_optional_region() {
221 assert(_optional_regions != NULL, "Not yet initialized");
222 assert(_optional_region_length != 0, "No region to remove");
223 _optional_region_length--;
224 HeapRegion* removed = _optional_regions[_optional_region_length];
225 _optional_regions[_optional_region_length] = NULL;
226 return removed;
227 }
228
229 private:
230 // Update the incremental collection set information when adding a region.
231 void add_young_region_common(HeapRegion* hr);
232 };
233
234 // Helper class to manage the optional regions in a Mixed collection.
235 class G1OptionalCSet : public StackObj {
236 private:
237 G1CollectionSet* _cset;
238 G1ParScanThreadStateSet* _pset;
239 uint _current_index;
240 uint _current_limit;
241 bool _prepare_failed;
242 bool _evacuation_failed;
243
244 void prepare_to_evacuate_optional_region(HeapRegion* hr);
245
246 public:
247 static const uint InvalidCSetIndex = UINT_MAX;
248
249 G1OptionalCSet(G1CollectionSet* cset, G1ParScanThreadStateSet* pset) :
250 _cset(cset),
251 _pset(pset),
252 _current_index(0),
253 _current_limit(0),
254 _prepare_failed(false),
255 _evacuation_failed(false) { }
256 // The destructor returns regions to the cset-chooser and
257 // frees the optional structure in the cset.
258 ~G1OptionalCSet();
259
260 uint current_index() { return _current_index; }
261 uint current_limit() { return _current_limit; }
262
263 uint size();
264 bool is_empty();
265
266 HeapRegion* region_at(uint index);
267
268 // Prepare a set of regions for optional evacuation.
269 void prepare_evacuation(double time_left_ms);
270 bool prepare_failed();
271
272 // Complete the evacuation of the previously prepared
273 // regions by updating their state and check for failures.
274 void complete_evacuation();
275 bool evacuation_failed();
276 };
277
278 #endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
279
|