1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/collectionSetChooser.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/heapRegionRemSet.hpp" 29 #include "gc/shared/space.inline.hpp" 30 #include "runtime/atomic.hpp" 31 32 // Even though we don't use the GC efficiency in our heuristics as 33 // much as we used to, we still order according to GC efficiency. This 34 // will cause regions with a lot of live objects and large RSets to 35 // end up at the end of the array. Given that we might skip collecting 36 // the last few old regions, if after a few mixed GCs the remaining 37 // have reclaimable bytes under a certain threshold, the hope is that 38 // the ones we'll skip are ones with both large RSets and a lot of 39 // live objects, not the ones with just a lot of live objects if we 40 // ordered according to the amount of reclaimable bytes per region. 41 static int order_regions(HeapRegion* hr1, HeapRegion* hr2) { 42 if (hr1 == NULL) { 43 if (hr2 == NULL) { 44 return 0; 45 } else { 46 return 1; 47 } 48 } else if (hr2 == NULL) { 49 return -1; 50 } 51 52 if (hr1->is_premature_old() && !hr2->is_premature_old()) { 53 return -1; 54 } 55 56 if (hr2->is_premature_old() && !hr1->is_premature_old()) { 57 return 1; 58 } 59 60 double gc_eff1 = hr1->gc_efficiency(); 61 double gc_eff2 = hr2->gc_efficiency(); 62 if (gc_eff1 > gc_eff2) { 63 return -1; 64 } if (gc_eff1 < gc_eff2) { 65 return 1; 66 } else { 67 return 0; 68 } 69 } 70 71 static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) { 72 return order_regions(*hr1p, *hr2p); 73 } 74 75 CollectionSetChooser::CollectionSetChooser() : 76 // The line below is the worst bit of C++ hackery I've ever written 77 // (Detlefs, 11/23). You should think of it as equivalent to 78 // "_regions(100, true)": initialize the growable array and inform it 79 // that it should allocate its elem array(s) on the C heap. 80 // 81 // The first argument, however, is actually a comma expression 82 // (set_allocation_type(this, C_HEAP), 100). The purpose of the 83 // set_allocation_type() call is to replace the default allocation 84 // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will 85 // allow to pass the assert in GenericGrowableArray() which checks 86 // that a growable array object must be on C heap if elements are. 87 // 88 // Note: containing object is allocated on C heap since it is CHeapObj. 89 // 90 _regions((ResourceObj::set_allocation_type((address) &_regions, 91 ResourceObj::C_HEAP), 92 100), true /* C_Heap */), 93 _front(0), _end(0), _first_par_unreserved_idx(0), 94 _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) { 95 _region_live_threshold_bytes = mixed_gc_live_threshold_bytes(); 96 } 97 98 #ifndef PRODUCT 99 void CollectionSetChooser::verify() { 100 guarantee(_end <= regions_length(), "_end: %u regions length: %u", _end, regions_length()); 101 guarantee(_front <= _end, "_front: %u _end: %u", _front, _end); 102 uint index = 0; 103 size_t sum_of_reclaimable_bytes = 0; 104 while (index < _front) { 105 guarantee(regions_at(index) == NULL, 106 "all entries before _front should be NULL"); 107 index += 1; 108 } 109 HeapRegion *prev = NULL; 110 while (index < _end) { 111 HeapRegion *curr = regions_at(index++); 112 guarantee(curr != NULL, "Regions in _regions array cannot be NULL"); 113 guarantee(!curr->is_young(), "should not be young!"); 114 guarantee(!curr->is_pinned(), 115 "Pinned region should not be in collection set (index %u)", curr->hrm_index()); 116 if (prev != NULL) { 117 guarantee(order_regions(prev, curr) != 1, 118 "GC eff prev: %1.4f GC eff curr: %1.4f", 119 prev->gc_efficiency(), curr->gc_efficiency()); 120 } 121 sum_of_reclaimable_bytes += curr->reclaimable_bytes(); 122 prev = curr; 123 } 124 guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes, 125 "reclaimable bytes inconsistent, " 126 "remaining: " SIZE_FORMAT " sum: " SIZE_FORMAT, 127 _remaining_reclaimable_bytes, sum_of_reclaimable_bytes); 128 } 129 #endif // !PRODUCT 130 131 void CollectionSetChooser::sort_regions() { 132 // First trim any unused portion of the top in the parallel case. 133 if (_first_par_unreserved_idx > 0) { 134 assert(_first_par_unreserved_idx <= regions_length(), 135 "Or we didn't reserved enough length"); 136 regions_trunc_to(_first_par_unreserved_idx); 137 } 138 _regions.sort(order_regions); 139 assert(_end <= regions_length(), "Requirement"); 140 #ifdef ASSERT 141 for (uint i = 0; i < _end; i++) { 142 assert(regions_at(i) != NULL, "Should be true by sorting!"); 143 } 144 #endif // ASSERT 145 if (log_is_enabled(Trace, gc, liveness)) { 146 G1PrintRegionLivenessInfoClosure cl("Post-Sorting"); 147 for (uint i = 0; i < _end; ++i) { 148 HeapRegion* r = regions_at(i); 149 cl.do_heap_region(r); 150 } 151 } 152 verify(); 153 } 154 155 void CollectionSetChooser::add_region(HeapRegion* hr) { 156 assert(!hr->is_pinned(), 157 "Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index()); 158 assert(hr->is_old(), "should be old but is %s", hr->get_type_str()); 159 assert(hr->rem_set()->is_complete(), 160 "Trying to add region %u to the collection set with incomplete remembered set", hr->hrm_index()); 161 _regions.append(hr); 162 _end++; 163 _remaining_reclaimable_bytes += hr->reclaimable_bytes(); 164 hr->calc_gc_efficiency(); 165 } 166 167 void CollectionSetChooser::push(HeapRegion* hr) { 168 assert(hr != NULL, "Can't put back a NULL region"); 169 assert(_front >= 1, "Too many regions have been put back"); 170 _front--; 171 regions_at_put(_front, hr); 172 _remaining_reclaimable_bytes += hr->reclaimable_bytes(); 173 } 174 175 void CollectionSetChooser::prepare_for_par_region_addition(uint n_threads, 176 uint n_regions, 177 uint chunk_size) { 178 _first_par_unreserved_idx = 0; 179 uint max_waste = n_threads * chunk_size; 180 // it should be aligned with respect to chunk_size 181 uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size; 182 assert(aligned_n_regions % chunk_size == 0, "should be aligned"); 183 regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL); 184 } 185 186 uint CollectionSetChooser::claim_array_chunk(uint chunk_size) { 187 uint res = (uint) Atomic::add((jint) chunk_size, 188 (volatile jint*) &_first_par_unreserved_idx); 189 assert(regions_length() > res + chunk_size - 1, 190 "Should already have been expanded"); 191 return res - chunk_size; 192 } 193 194 void CollectionSetChooser::set_region(uint index, HeapRegion* hr) { 195 assert(regions_at(index) == NULL, "precondition"); 196 assert(hr->is_old(), "should be old but is %s", hr->get_type_str()); 197 regions_at_put(index, hr); 198 hr->calc_gc_efficiency(); 199 } 200 201 void CollectionSetChooser::update_totals(uint region_num, 202 size_t reclaimable_bytes) { 203 // Only take the lock if we actually need to update the totals. 204 if (region_num > 0) { 205 assert(reclaimable_bytes > 0, "invariant"); 206 // We could have just used atomics instead of taking the 207 // lock. However, we currently don't have an atomic add for size_t. 208 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 209 _end += region_num; 210 _remaining_reclaimable_bytes += reclaimable_bytes; 211 } else { 212 assert(reclaimable_bytes == 0, "invariant"); 213 } 214 } 215 216 void CollectionSetChooser::iterate(HeapRegionClosure* cl) { 217 for (uint i = _front; i < _end; i++) { 218 HeapRegion* r = regions_at(i); 219 if (cl->do_heap_region(r)) { 220 cl->set_incomplete(); 221 break; 222 } 223 } 224 } 225 226 void CollectionSetChooser::clear() { 227 _regions.clear(); 228 _front = 0; 229 _end = 0; 230 _remaining_reclaimable_bytes = 0; 231 } 232 233 class ParKnownGarbageHRClosure: public HeapRegionClosure { 234 G1CollectedHeap* _g1h; 235 CSetChooserParUpdater _cset_updater; 236 237 public: 238 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 239 uint chunk_size) : 240 _g1h(G1CollectedHeap::heap()), 241 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 242 243 bool do_heap_region(HeapRegion* r) { 244 // We will skip any region that's currently used as an old GC 245 // alloc region (we should not consider those for collection 246 // before we fill them up). 247 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 248 _cset_updater.add_region(r); 249 } else if (r->is_old()) { 250 // Keep remembered sets for humongous regions, otherwise clean out remembered 251 // sets for old regions. 252 r->rem_set()->clear(true /* only_cardset */); 253 } else { 254 assert(r->is_archive() || !r->is_old() || !r->rem_set()->is_tracked(), 255 "Missed to clear unused remembered set of region %u (%s) that is %s", 256 r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str()); 257 } 258 return false; 259 } 260 }; 261 262 class ParKnownGarbageTask: public AbstractGangTask { 263 CollectionSetChooser* _hrSorted; 264 uint _chunk_size; 265 G1CollectedHeap* _g1h; 266 HeapRegionClaimer _hrclaimer; 267 268 public: 269 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : 270 AbstractGangTask("ParKnownGarbageTask"), 271 _hrSorted(hrSorted), _chunk_size(chunk_size), 272 _g1h(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} 273 274 void work(uint worker_id) { 275 ParKnownGarbageHRClosure par_known_garbage_cl(_hrSorted, _chunk_size); 276 _g1h->heap_region_par_iterate_from_worker_offset(&par_known_garbage_cl, &_hrclaimer, worker_id); 277 } 278 }; 279 280 uint CollectionSetChooser::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { 281 assert(n_workers > 0, "Active gc workers should be greater than 0"); 282 const uint overpartition_factor = 4; 283 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); 284 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); 285 } 286 287 bool CollectionSetChooser::region_occupancy_low_enough_for_evac(size_t live_bytes) { 288 return live_bytes < mixed_gc_live_threshold_bytes(); 289 } 290 291 bool CollectionSetChooser::should_add(HeapRegion* hr) const { 292 return !hr->is_young() && 293 !hr->is_pinned() && 294 (hr->is_premature_old() || region_occupancy_low_enough_for_evac(hr->live_bytes())) && 295 hr->rem_set()->is_complete(); 296 } 297 298 void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) { 299 clear(); 300 301 uint n_workers = workers->active_workers(); 302 303 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); 304 prepare_for_par_region_addition(n_workers, n_regions, chunk_size); 305 306 ParKnownGarbageTask par_known_garbage_task(this, chunk_size, n_workers); 307 workers->run_task(&par_known_garbage_task); 308 309 sort_regions(); 310 }