32 // Even though we don't use the GC efficiency in our heuristics as
33 // much as we used to, we still order according to GC efficiency. This
34 // will cause regions with a lot of live objects and large RSets to
35 // end up at the end of the array. Given that we might skip collecting
36 // the last few old regions, if after a few mixed GCs the remaining
37 // have reclaimable bytes under a certain threshold, the hope is that
38 // the ones we'll skip are ones with both large RSets and a lot of
39 // live objects, not the ones with just a lot of live objects if we
40 // ordered according to the amount of reclaimable bytes per region.
41 static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
42 if (hr1 == NULL) {
43 if (hr2 == NULL) {
44 return 0;
45 } else {
46 return 1;
47 }
48 } else if (hr2 == NULL) {
49 return -1;
50 }
51
52 double gc_eff1 = hr1->gc_efficiency();
53 double gc_eff2 = hr2->gc_efficiency();
54 if (gc_eff1 > gc_eff2) {
55 return -1;
56 } if (gc_eff1 < gc_eff2) {
57 return 1;
58 } else {
59 return 0;
60 }
61 }
62
63 static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) {
64 return order_regions(*hr1p, *hr2p);
65 }
66
67 CollectionSetChooser::CollectionSetChooser() :
68 // The line below is the worst bit of C++ hackery I've ever written
69 // (Detlefs, 11/23). You should think of it as equivalent to
70 // "_regions(100, true)": initialize the growable array and inform it
71 // that it should allocate its elem array(s) on the C heap.
266 void work(uint worker_id) {
267 ParKnownGarbageHRClosure par_known_garbage_cl(_hrSorted, _chunk_size);
268 _g1h->heap_region_par_iterate_from_worker_offset(&par_known_garbage_cl, &_hrclaimer, worker_id);
269 }
270 };
271
272 uint CollectionSetChooser::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
273 assert(n_workers > 0, "Active gc workers should be greater than 0");
274 const uint overpartition_factor = 4;
275 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
276 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
277 }
278
279 bool CollectionSetChooser::region_occupancy_low_enough_for_evac(size_t live_bytes) {
280 return live_bytes < mixed_gc_live_threshold_bytes();
281 }
282
283 bool CollectionSetChooser::should_add(HeapRegion* hr) const {
284 return !hr->is_young() &&
285 !hr->is_pinned() &&
286 region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
287 hr->rem_set()->is_complete();
288 }
289
290 void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) {
291 clear();
292
293 uint n_workers = workers->active_workers();
294
295 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
296 prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
297
298 ParKnownGarbageTask par_known_garbage_task(this, chunk_size, n_workers);
299 workers->run_task(&par_known_garbage_task);
300
301 sort_regions();
302 }
|
32 // Even though we don't use the GC efficiency in our heuristics as
33 // much as we used to, we still order according to GC efficiency. This
34 // will cause regions with a lot of live objects and large RSets to
35 // end up at the end of the array. Given that we might skip collecting
36 // the last few old regions, if after a few mixed GCs the remaining
37 // have reclaimable bytes under a certain threshold, the hope is that
38 // the ones we'll skip are ones with both large RSets and a lot of
39 // live objects, not the ones with just a lot of live objects if we
40 // ordered according to the amount of reclaimable bytes per region.
41 static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
42 if (hr1 == NULL) {
43 if (hr2 == NULL) {
44 return 0;
45 } else {
46 return 1;
47 }
48 } else if (hr2 == NULL) {
49 return -1;
50 }
51
52 if (hr1->is_premature_old() && !hr2->is_premature_old()) {
53 return -1;
54 }
55
56 if (hr2->is_premature_old() && !hr1->is_premature_old()) {
57 return 1;
58 }
59
60 double gc_eff1 = hr1->gc_efficiency();
61 double gc_eff2 = hr2->gc_efficiency();
62 if (gc_eff1 > gc_eff2) {
63 return -1;
64 } if (gc_eff1 < gc_eff2) {
65 return 1;
66 } else {
67 return 0;
68 }
69 }
70
71 static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) {
72 return order_regions(*hr1p, *hr2p);
73 }
74
75 CollectionSetChooser::CollectionSetChooser() :
76 // The line below is the worst bit of C++ hackery I've ever written
77 // (Detlefs, 11/23). You should think of it as equivalent to
78 // "_regions(100, true)": initialize the growable array and inform it
79 // that it should allocate its elem array(s) on the C heap.
274 void work(uint worker_id) {
275 ParKnownGarbageHRClosure par_known_garbage_cl(_hrSorted, _chunk_size);
276 _g1h->heap_region_par_iterate_from_worker_offset(&par_known_garbage_cl, &_hrclaimer, worker_id);
277 }
278 };
279
280 uint CollectionSetChooser::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
281 assert(n_workers > 0, "Active gc workers should be greater than 0");
282 const uint overpartition_factor = 4;
283 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
284 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
285 }
286
287 bool CollectionSetChooser::region_occupancy_low_enough_for_evac(size_t live_bytes) {
288 return live_bytes < mixed_gc_live_threshold_bytes();
289 }
290
291 bool CollectionSetChooser::should_add(HeapRegion* hr) const {
292 return !hr->is_young() &&
293 !hr->is_pinned() &&
294 (hr->is_premature_old() || region_occupancy_low_enough_for_evac(hr->live_bytes())) &&
295 hr->rem_set()->is_complete();
296 }
297
298 void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) {
299 clear();
300
301 uint n_workers = workers->active_workers();
302
303 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
304 prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
305
306 ParKnownGarbageTask par_known_garbage_task(this, chunk_size, n_workers);
307 workers->run_task(&par_known_garbage_task);
308
309 sort_regions();
310 }
|