1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/collectionSetChooser.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/heapRegionRemSet.hpp"
29 #include "gc/shared/space.inline.hpp"
30 #include "runtime/atomic.hpp"
31
32 // Even though we don't use the GC efficiency in our heuristics as
33 // much as we used to, we still order according to GC efficiency. This
34 // will cause regions with a lot of live objects and large RSets to
35 // end up at the end of the array. Given that we might skip collecting
36 // the last few old regions, if after a few mixed GCs the remaining
37 // have reclaimable bytes under a certain threshold, the hope is that
38 // the ones we'll skip are ones with both large RSets and a lot of
39 // live objects, not the ones with just a lot of live objects if we
40 // ordered according to the amount of reclaimable bytes per region.
41 static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
42 if (hr1 == NULL) {
43 if (hr2 == NULL) {
44 return 0;
45 } else {
46 return 1;
47 }
48 } else if (hr2 == NULL) {
49 return -1;
50 }
51
52 double gc_eff1 = hr1->gc_efficiency();
53 double gc_eff2 = hr2->gc_efficiency();
54 if (gc_eff1 > gc_eff2) {
55 return -1;
56 } if (gc_eff1 < gc_eff2) {
57 return 1;
58 } else {
59 return 0;
60 }
61 }
62
63 static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) {
64 return order_regions(*hr1p, *hr2p);
65 }
66
67 CollectionSetChooser::CollectionSetChooser() :
68 // The line below is the worst bit of C++ hackery I've ever written
69 // (Detlefs, 11/23). You should think of it as equivalent to
70 // "_regions(100, true)": initialize the growable array and inform it
71 // that it should allocate its elem array(s) on the C heap.
72 //
73 // The first argument, however, is actually a comma expression
74 // (set_allocation_type(this, C_HEAP), 100). The purpose of the
75 // set_allocation_type() call is to replace the default allocation
76 // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
77 // allow to pass the assert in GenericGrowableArray() which checks
78 // that a growable array object must be on C heap if elements are.
79 //
80 // Note: containing object is allocated on C heap since it is CHeapObj.
81 //
82 _regions((ResourceObj::set_allocation_type((address) &_regions,
83 ResourceObj::C_HEAP),
84 100), true /* C_Heap */),
85 _front(0), _end(0), _first_par_unreserved_idx(0),
86 _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
87 _region_live_threshold_bytes = mixed_gc_live_threshold_bytes();
88 }
89
90 #ifndef PRODUCT
91 void CollectionSetChooser::verify() {
92 guarantee(_end <= regions_length(), "_end: %u regions length: %u", _end, regions_length());
93 guarantee(_front <= _end, "_front: %u _end: %u", _front, _end);
94 uint index = 0;
95 size_t sum_of_reclaimable_bytes = 0;
96 while (index < _front) {
97 guarantee(regions_at(index) == NULL,
98 "all entries before _front should be NULL");
99 index += 1;
100 }
101 HeapRegion *prev = NULL;
102 while (index < _end) {
103 HeapRegion *curr = regions_at(index++);
104 guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
105 guarantee(!curr->is_young(), "should not be young!");
106 guarantee(!curr->is_pinned(),
107 "Pinned region should not be in collection set (index %u)", curr->hrm_index());
108 if (prev != NULL) {
109 guarantee(order_regions(prev, curr) != 1,
110 "GC eff prev: %1.4f GC eff curr: %1.4f",
111 prev->gc_efficiency(), curr->gc_efficiency());
112 }
113 sum_of_reclaimable_bytes += curr->reclaimable_bytes();
114 prev = curr;
115 }
116 guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes,
117 "reclaimable bytes inconsistent, "
118 "remaining: " SIZE_FORMAT " sum: " SIZE_FORMAT,
119 _remaining_reclaimable_bytes, sum_of_reclaimable_bytes);
120 }
121 #endif // !PRODUCT
122
123 void CollectionSetChooser::sort_regions() {
124 // First trim any unused portion of the top in the parallel case.
125 if (_first_par_unreserved_idx > 0) {
126 assert(_first_par_unreserved_idx <= regions_length(),
127 "Or we didn't reserved enough length");
128 regions_trunc_to(_first_par_unreserved_idx);
129 }
130 _regions.sort(order_regions);
131 assert(_end <= regions_length(), "Requirement");
132 #ifdef ASSERT
133 for (uint i = 0; i < _end; i++) {
134 assert(regions_at(i) != NULL, "Should be true by sorting!");
135 }
136 #endif // ASSERT
137 if (log_is_enabled(Trace, gc, liveness)) {
138 G1PrintRegionLivenessInfoClosure cl("Post-Sorting");
139 for (uint i = 0; i < _end; ++i) {
140 HeapRegion* r = regions_at(i);
141 cl.do_heap_region(r);
142 }
143 }
144 verify();
145 }
146
147 void CollectionSetChooser::add_region(HeapRegion* hr) {
148 assert(!hr->is_pinned(),
149 "Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index());
150 assert(hr->is_old(), "should be old but is %s", hr->get_type_str());
151 assert(hr->rem_set()->is_complete(),
152 "Trying to add region %u to the collection set with incomplete remembered set", hr->hrm_index());
153 _regions.append(hr);
154 _end++;
155 _remaining_reclaimable_bytes += hr->reclaimable_bytes();
156 hr->calc_gc_efficiency();
157 }
158
159 void CollectionSetChooser::push(HeapRegion* hr) {
160 assert(hr != NULL, "Can't put back a NULL region");
161 assert(_front >= 1, "Too many regions have been put back");
162 _front--;
163 regions_at_put(_front, hr);
164 _remaining_reclaimable_bytes += hr->reclaimable_bytes();
165 }
166
167 void CollectionSetChooser::prepare_for_par_region_addition(uint n_threads,
168 uint n_regions,
169 uint chunk_size) {
170 _first_par_unreserved_idx = 0;
171 uint max_waste = n_threads * chunk_size;
172 // it should be aligned with respect to chunk_size
173 uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size;
174 assert(aligned_n_regions % chunk_size == 0, "should be aligned");
175 regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL);
176 }
177
178 uint CollectionSetChooser::claim_array_chunk(uint chunk_size) {
179 uint res = (uint) Atomic::add((jint) chunk_size,
180 (volatile jint*) &_first_par_unreserved_idx);
181 assert(regions_length() > res + chunk_size - 1,
182 "Should already have been expanded");
183 return res - chunk_size;
184 }
185
186 void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
187 assert(regions_at(index) == NULL, "precondition");
188 assert(hr->is_old(), "should be old but is %s", hr->get_type_str());
189 regions_at_put(index, hr);
190 hr->calc_gc_efficiency();
191 }
192
193 void CollectionSetChooser::update_totals(uint region_num,
194 size_t reclaimable_bytes) {
195 // Only take the lock if we actually need to update the totals.
196 if (region_num > 0) {
197 assert(reclaimable_bytes > 0, "invariant");
198 // We could have just used atomics instead of taking the
199 // lock. However, we currently don't have an atomic add for size_t.
200 MutexLocker x(ParGCRareEvent_lock);
201 _end += region_num;
202 _remaining_reclaimable_bytes += reclaimable_bytes;
203 } else {
204 assert(reclaimable_bytes == 0, "invariant");
205 }
206 }
207
208 void CollectionSetChooser::iterate(HeapRegionClosure* cl) {
209 for (uint i = _front; i < _end; i++) {
210 HeapRegion* r = regions_at(i);
211 if (cl->do_heap_region(r)) {
212 cl->set_incomplete();
213 break;
214 }
215 }
216 }
217
218 void CollectionSetChooser::clear() {
219 _regions.clear();
220 _front = 0;
221 _end = 0;
222 _remaining_reclaimable_bytes = 0;
223 }
224
225 class ParKnownGarbageHRClosure: public HeapRegionClosure {
226 G1CollectedHeap* _g1h;
227 CSetChooserParUpdater _cset_updater;
228
229 public:
230 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
231 uint chunk_size) :
232 _g1h(G1CollectedHeap::heap()),
233 _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
234
235 bool do_heap_region(HeapRegion* r) {
236 // We will skip any region that's currently used as an old GC
237 // alloc region (we should not consider those for collection
238 // before we fill them up).
239 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
240 _cset_updater.add_region(r);
241 } else if (r->is_old()) {
242 // Keep remembered sets for humongous regions, otherwise clean out remembered
243 // sets for old regions.
244 r->rem_set()->clear(true /* only_cardset */);
245 } else {
246 assert(r->is_archive() || !r->is_old() || !r->rem_set()->is_tracked(),
247 "Missed to clear unused remembered set of region %u (%s) that is %s",
248 r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());
249 }
250 return false;
251 }
252 };
253
254 class ParKnownGarbageTask: public AbstractGangTask {
255 CollectionSetChooser* _hrSorted;
256 uint _chunk_size;
257 G1CollectedHeap* _g1h;
258 HeapRegionClaimer _hrclaimer;
259
260 public:
261 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
262 AbstractGangTask("ParKnownGarbageTask"),
263 _hrSorted(hrSorted), _chunk_size(chunk_size),
264 _g1h(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
265
266 void work(uint worker_id) {
267 ParKnownGarbageHRClosure par_known_garbage_cl(_hrSorted, _chunk_size);
268 _g1h->heap_region_par_iterate_from_worker_offset(&par_known_garbage_cl, &_hrclaimer, worker_id);
269 }
270 };
271
272 uint CollectionSetChooser::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
273 assert(n_workers > 0, "Active gc workers should be greater than 0");
274 const uint overpartition_factor = 4;
275 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
276 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
277 }
278
279 bool CollectionSetChooser::region_occupancy_low_enough_for_evac(size_t live_bytes) {
280 return live_bytes < mixed_gc_live_threshold_bytes();
281 }
282
283 bool CollectionSetChooser::should_add(HeapRegion* hr) const {
284 return !hr->is_young() &&
285 !hr->is_pinned() &&
286 region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
287 hr->rem_set()->is_complete();
288 }
289
290 void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) {
291 clear();
292
293 uint n_workers = workers->active_workers();
294
295 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
296 prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
297
298 ParKnownGarbageTask par_known_garbage_task(this, chunk_size, n_workers);
299 workers->run_task(&par_known_garbage_task);
300
301 sort_regions();
302 }
--- EOF ---