6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/collectionSetChooser.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/heapRegionRemSet.hpp"
29 #include "gc/shared/space.inline.hpp"
30 #include "runtime/atomic.hpp"
31 #include "utilities/quickSort.hpp"
32
33 // Order regions according to GC efficiency. This will cause regions with a lot
34 // of live objects and large remembered sets to end up at the end of the array.
35 // Given that we might skip collecting the last few old regions, if after a few
36 // mixed GCs the remaining have reclaimable bytes under a certain threshold, the
37 // hope is that the ones we'll skip are ones with both large remembered sets and
38 // a lot of live objects, not the ones with just a lot of live objects if we
39 // ordered according to the amount of reclaimable bytes per region.
40 static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
41 // Make sure that NULL entries are moved to the end.
42 if (hr1 == NULL) {
43 if (hr2 == NULL) {
44 return 0;
45 } else {
46 return 1;
47 }
153 uint _cur_chunk_end;
154
155 uint _regions_added;
156 size_t _reclaimable_bytes_added;
157
158 void add_region(HeapRegion* hr) {
159 if (_cur_chunk_idx == _cur_chunk_end) {
160 _array->claim_chunk(_cur_chunk_idx, _cur_chunk_end);
161 }
162 assert(_cur_chunk_idx < _cur_chunk_end, "Must be");
163
164 hr->calc_gc_efficiency();
165 _array->set(_cur_chunk_idx, hr);
166
167 _cur_chunk_idx++;
168
169 _regions_added++;
170 _reclaimable_bytes_added += hr->reclaimable_bytes();
171 }
172
173 bool should_add(HeapRegion* hr) { return CollectionSetChooser::should_add(hr); }
174
175 public:
176 G1BuildCandidateRegionsClosure(G1BuildCandidateArray* array) :
177 _array(array),
178 _cur_chunk_idx(0),
179 _cur_chunk_end(0),
180 _regions_added(0),
181 _reclaimable_bytes_added(0) { }
182
183 bool do_heap_region(HeapRegion* r) {
184 // We will skip any region that's currently used as an old GC
185 // alloc region (we should not consider those for collection
186 // before we fill them up).
187 if (should_add(r) && !G1CollectedHeap::heap()->is_old_gc_alloc_region(r)) {
188 add_region(r);
189 } else if (r->is_old()) {
190 // Keep remembered sets for humongous regions, otherwise clean out remembered
191 // sets for old regions.
192 r->rem_set()->clear(true /* only_cardset */);
193 } else {
227 _hrclaimer(num_workers),
228 _num_regions_added(0),
229 _reclaimable_bytes_added(0),
230 _result(max_num_regions, chunk_size, num_workers) { }
231
232 void work(uint worker_id) {
233 G1BuildCandidateRegionsClosure cl(&_result);
234 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
235 update_totals(cl.regions_added(), cl.reclaimable_bytes_added());
236 }
237
238 G1CollectionSetCandidates* get_sorted_candidates() {
239 HeapRegion** regions = NEW_C_HEAP_ARRAY(HeapRegion*, _num_regions_added, mtGC);
240 _result.sort_and_copy_into(regions, _num_regions_added);
241 return new G1CollectionSetCandidates(regions,
242 _num_regions_added,
243 _reclaimable_bytes_added);
244 }
245 };
246
247 uint CollectionSetChooser::calculate_work_chunk_size(uint num_workers, uint num_regions) {
248 assert(num_workers > 0, "Active gc workers should be greater than 0");
249 return MAX2(num_regions / num_workers, 1U);
250 }
251
252 bool CollectionSetChooser::should_add(HeapRegion* hr) {
253 return !hr->is_young() &&
254 !hr->is_pinned() &&
255 region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
256 hr->rem_set()->is_complete();
257 }
258
259 G1CollectionSetCandidates* CollectionSetChooser::build(WorkGang* workers, uint max_num_regions) {
260 uint num_workers = workers->active_workers();
261 uint chunk_size = calculate_work_chunk_size(num_workers, max_num_regions);
262
263 G1BuildCandidateRegionsTask cl(max_num_regions, chunk_size, num_workers);
264 workers->run_task(&cl, num_workers);
265
266 G1CollectionSetCandidates* result = cl.get_sorted_candidates();
267 result->verify();
268 return result;
269 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectedHeap.inline.hpp"
27 #include "gc/g1/g1CollectionSetChooser.hpp"
28 #include "gc/g1/heapRegionRemSet.hpp"
29 #include "gc/shared/space.inline.hpp"
30 #include "runtime/atomic.hpp"
31 #include "utilities/quickSort.hpp"
32
33 // Order regions according to GC efficiency. This will cause regions with a lot
34 // of live objects and large remembered sets to end up at the end of the array.
35 // Given that we might skip collecting the last few old regions, if after a few
36 // mixed GCs the remaining have reclaimable bytes under a certain threshold, the
37 // hope is that the ones we'll skip are ones with both large remembered sets and
38 // a lot of live objects, not the ones with just a lot of live objects if we
39 // ordered according to the amount of reclaimable bytes per region.
40 static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
41 // Make sure that NULL entries are moved to the end.
42 if (hr1 == NULL) {
43 if (hr2 == NULL) {
44 return 0;
45 } else {
46 return 1;
47 }
153 uint _cur_chunk_end;
154
155 uint _regions_added;
156 size_t _reclaimable_bytes_added;
157
158 void add_region(HeapRegion* hr) {
159 if (_cur_chunk_idx == _cur_chunk_end) {
160 _array->claim_chunk(_cur_chunk_idx, _cur_chunk_end);
161 }
162 assert(_cur_chunk_idx < _cur_chunk_end, "Must be");
163
164 hr->calc_gc_efficiency();
165 _array->set(_cur_chunk_idx, hr);
166
167 _cur_chunk_idx++;
168
169 _regions_added++;
170 _reclaimable_bytes_added += hr->reclaimable_bytes();
171 }
172
173 bool should_add(HeapRegion* hr) { return G1CollectionSetChooser::should_add(hr); }
174
175 public:
176 G1BuildCandidateRegionsClosure(G1BuildCandidateArray* array) :
177 _array(array),
178 _cur_chunk_idx(0),
179 _cur_chunk_end(0),
180 _regions_added(0),
181 _reclaimable_bytes_added(0) { }
182
183 bool do_heap_region(HeapRegion* r) {
184 // We will skip any region that's currently used as an old GC
185 // alloc region (we should not consider those for collection
186 // before we fill them up).
187 if (should_add(r) && !G1CollectedHeap::heap()->is_old_gc_alloc_region(r)) {
188 add_region(r);
189 } else if (r->is_old()) {
190 // Keep remembered sets for humongous regions, otherwise clean out remembered
191 // sets for old regions.
192 r->rem_set()->clear(true /* only_cardset */);
193 } else {
227 _hrclaimer(num_workers),
228 _num_regions_added(0),
229 _reclaimable_bytes_added(0),
230 _result(max_num_regions, chunk_size, num_workers) { }
231
232 void work(uint worker_id) {
233 G1BuildCandidateRegionsClosure cl(&_result);
234 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
235 update_totals(cl.regions_added(), cl.reclaimable_bytes_added());
236 }
237
238 G1CollectionSetCandidates* get_sorted_candidates() {
239 HeapRegion** regions = NEW_C_HEAP_ARRAY(HeapRegion*, _num_regions_added, mtGC);
240 _result.sort_and_copy_into(regions, _num_regions_added);
241 return new G1CollectionSetCandidates(regions,
242 _num_regions_added,
243 _reclaimable_bytes_added);
244 }
245 };
246
247 uint G1CollectionSetChooser::calculate_work_chunk_size(uint num_workers, uint num_regions) {
248 assert(num_workers > 0, "Active gc workers should be greater than 0");
249 return MAX2(num_regions / num_workers, 1U);
250 }
251
252 bool G1CollectionSetChooser::should_add(HeapRegion* hr) {
253 return !hr->is_young() &&
254 !hr->is_pinned() &&
255 region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
256 hr->rem_set()->is_complete();
257 }
258
259 G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkGang* workers, uint max_num_regions) {
260 uint num_workers = workers->active_workers();
261 uint chunk_size = calculate_work_chunk_size(num_workers, max_num_regions);
262
263 G1BuildCandidateRegionsTask cl(max_num_regions, chunk_size, num_workers);
264 workers->run_task(&cl, num_workers);
265
266 G1CollectionSetCandidates* result = cl.get_sorted_candidates();
267 result->verify();
268 return result;
269 }
|