1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/heapRegion.hpp" 27 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 28 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/concurrentG1Refine.hpp" 31 #include "memory/allocation.hpp" 32 33 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, 34 G1RegionToSpaceMapper* prev_bitmap, 35 G1RegionToSpaceMapper* next_bitmap, 36 G1RegionToSpaceMapper* bot, 37 G1RegionToSpaceMapper* cardtable, 38 G1RegionToSpaceMapper* card_counts) { 39 _allocated_heapregions_length = 0; 40 41 _heap_mapper = heap_storage; 42 43 _prev_bitmap_mapper = prev_bitmap; 44 _next_bitmap_mapper = next_bitmap; 45 46 _bot_mapper = bot; 47 _cardtable_mapper = cardtable; 48 49 _card_counts_mapper = card_counts; 50 51 MemRegion reserved = heap_storage->reserved(); 52 _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes); 53 54 _available_map.resize(_regions.length(), false); 55 _available_map.clear(); 56 } 57 58 bool HeapRegionManager::is_available(uint region) const { 59 return _available_map.at(region); 60 } 61 62 #ifdef ASSERT 63 bool HeapRegionManager::is_free(HeapRegion* hr) const { 64 return _free_list.contains(hr); 65 } 66 #endif 67 68 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) { 69 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 70 HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index); 71 MemRegion mr(bottom, bottom + HeapRegion::GrainWords); 72 assert(reserved().contains(mr), "invariant"); 73 return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr); 74 } 75 76 void HeapRegionManager::commit_regions(uint index, size_t num_regions) { 77 guarantee(num_regions > 0, "Must commit more than zero regions"); 78 guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions"); 79 80 _num_committed += (uint)num_regions; 81 82 _heap_mapper->commit_regions(index, num_regions); 83 84 // Also commit auxiliary data 85 _prev_bitmap_mapper->commit_regions(index, num_regions); 86 _next_bitmap_mapper->commit_regions(index, num_regions); 87 88 _bot_mapper->commit_regions(index, num_regions); 89 _cardtable_mapper->commit_regions(index, num_regions); 90 91 _card_counts_mapper->commit_regions(index, num_regions); 92 } 93 94 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) { 95 guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start)); 96 guarantee(_num_committed >= num_regions, "pre-condition"); 97 98 // Print before uncommitting. 99 if (G1CollectedHeap::heap()->hr_printer()->is_active()) { 100 for (uint i = start; i < start + num_regions; i++) { 101 HeapRegion* hr = at(i); 102 G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end()); 103 } 104 } 105 106 _num_committed -= (uint)num_regions; 107 108 _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range); 109 _heap_mapper->uncommit_regions(start, num_regions); 110 111 // Also uncommit auxiliary data 112 _prev_bitmap_mapper->uncommit_regions(start, num_regions); 113 _next_bitmap_mapper->uncommit_regions(start, num_regions); 114 115 _bot_mapper->uncommit_regions(start, num_regions); 116 _cardtable_mapper->uncommit_regions(start, num_regions); 117 118 _card_counts_mapper->uncommit_regions(start, num_regions); 119 } 120 121 void HeapRegionManager::make_regions_available(uint start, uint num_regions) { 122 guarantee(num_regions > 0, "No point in calling this for zero regions"); 123 commit_regions(start, num_regions); 124 for (uint i = start; i < start + num_regions; i++) { 125 if (_regions.get_by_index(i) == NULL) { 126 HeapRegion* new_hr = new_heap_region(i); 127 _regions.set_by_index(i, new_hr); 128 _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1); 129 } 130 } 131 132 _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range); 133 134 for (uint i = start; i < start + num_regions; i++) { 135 assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i)); 136 HeapRegion* hr = at(i); 137 if (G1CollectedHeap::heap()->hr_printer()->is_active()) { 138 G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end()); 139 } 140 HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i); 141 MemRegion mr(bottom, bottom + HeapRegion::GrainWords); 142 143 hr->initialize(mr); 144 insert_into_free_list(at(i)); 145 } 146 } 147 148 uint HeapRegionManager::expand_by(uint num_regions) { 149 return expand_at(0, num_regions); 150 } 151 152 uint HeapRegionManager::expand_at(uint start, uint num_regions) { 153 if (num_regions == 0) { 154 return 0; 155 } 156 157 uint cur = start; 158 uint idx_last_found = 0; 159 uint num_last_found = 0; 160 161 uint expanded = 0; 162 163 while (expanded < num_regions && 164 (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) { 165 uint to_expand = MIN2(num_regions - expanded, num_last_found); 166 make_regions_available(idx_last_found, to_expand); 167 expanded += to_expand; 168 cur = idx_last_found + num_last_found + 1; 169 } 170 171 verify_optional(); 172 return expanded; 173 } 174 175 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) { 176 uint found = 0; 177 size_t length_found = 0; 178 uint cur = 0; 179 180 while (length_found < num && cur < max_length()) { 181 HeapRegion* hr = _regions.get_by_index(cur); 182 if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { 183 // This region is a potential candidate for allocation into. 184 length_found++; 185 } else { 186 // This region is not a candidate. The next region is the next possible one. 187 found = cur + 1; 188 length_found = 0; 189 } 190 cur++; 191 } 192 193 if (length_found == num) { 194 for (uint i = found; i < (found + num); i++) { 195 HeapRegion* hr = _regions.get_by_index(i); 196 // sanity check 197 guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), 198 err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT 199 " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr))); 200 } 201 return found; 202 } else { 203 return G1_NO_HRM_INDEX; 204 } 205 } 206 207 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const { 208 guarantee(r != NULL, "Start region must be a valid region"); 209 guarantee(is_available(r->hrm_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrm_index())); 210 for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) { 211 HeapRegion* hr = _regions.get_by_index(i); 212 if (is_available(i)) { 213 return hr; 214 } 215 } 216 return NULL; 217 } 218 219 void HeapRegionManager::iterate(HeapRegionClosure* blk) const { 220 uint len = max_length(); 221 222 for (uint i = 0; i < len; i++) { 223 if (!is_available(i)) { 224 continue; 225 } 226 guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i)); 227 bool res = blk->doHeapRegion(at(i)); 228 if (res) { 229 blk->incomplete(); 230 return; 231 } 232 } 233 } 234 235 uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const { 236 guarantee(res_idx != NULL, "checking"); 237 guarantee(start_idx <= (max_length() + 1), "checking"); 238 239 uint num_regions = 0; 240 241 uint cur = start_idx; 242 while (cur < max_length() && is_available(cur)) { 243 cur++; 244 } 245 if (cur == max_length()) { 246 return num_regions; 247 } 248 *res_idx = cur; 249 while (cur < max_length() && !is_available(cur)) { 250 cur++; 251 } 252 num_regions = cur - *res_idx; 253 #ifdef ASSERT 254 for (uint i = *res_idx; i < (*res_idx + num_regions); i++) { 255 assert(!is_available(i), "just checking"); 256 } 257 assert(cur == max_length() || num_regions == 0 || is_available(cur), 258 err_msg("The region at the current position %u must be available or at the end of the heap.", cur)); 259 #endif 260 return num_regions; 261 } 262 263 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const { 264 const uint start_index = hrclaimer->start_region_for_worker(worker_id); 265 266 // Every worker will actually look at all regions, skipping over regions that 267 // are currently not committed. 268 // This also (potentially) iterates over regions newly allocated during GC. This 269 // is no problem except for some extra work. 270 const uint n_regions = hrclaimer->n_regions(); 271 for (uint count = 0; count < n_regions; count++) { 272 const uint index = (start_index + count) % n_regions; 273 assert(index < n_regions, "sanity"); 274 // Skip over unavailable regions 275 if (!is_available(index)) { 276 continue; 277 } 278 HeapRegion* r = _regions.get_by_index(index); 279 // We'll ignore "continues humongous" regions (we'll process them 280 // when we come across their corresponding "start humongous" 281 // region) and regions already claimed. 282 // However, if the iteration is specified as concurrent, the values for 283 // is_starts_humongous and is_continues_humongous can not be trusted, 284 // and we should just blindly iterate over regions regardless of their 285 // humongous status. 286 if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) { 287 continue; 288 } 289 // OK, try to claim it 290 if (!hrclaimer->claim_region(index)) { 291 continue; 292 } 293 // Success! 294 // As mentioned above, special treatment of humongous regions can only be 295 // done if we are iterating non-concurrently. 296 if (!concurrent && r->is_starts_humongous()) { 297 // If the region is "starts humongous" we'll iterate over its 298 // "continues humongous" first; in fact we'll do them 299 // first. The order is important. In one case, calling the 300 // closure on the "starts humongous" region might de-allocate 301 // and clear all its "continues humongous" regions and, as a 302 // result, we might end up processing them twice. So, we'll do 303 // them first (note: most closures will ignore them anyway) and 304 // then we'll do the "starts humongous" region. 305 for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) { 306 HeapRegion* chr = _regions.get_by_index(ch_index); 307 308 assert(chr->is_continues_humongous(), "Must be humongous region"); 309 assert(chr->humongous_start_region() == r, 310 err_msg("Must work on humongous continuation of the original start region " 311 PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr))); 312 assert(!hrclaimer->is_region_claimed(ch_index), 313 "Must not have been claimed yet because claiming of humongous continuation first claims the start region"); 314 315 // There's no need to actually claim the continues humongous region, but we can do it in an assert as an extra precaution. 316 assert(hrclaimer->claim_region(ch_index), "We should always be able to claim the continuesHumongous part of the humongous object"); 317 318 bool res2 = blk->doHeapRegion(chr); 319 if (res2) { 320 return; 321 } 322 323 // Right now, this holds (i.e., no closure that actually 324 // does something with "continues humongous" regions 325 // clears them). We might have to weaken it in the future, 326 // but let's leave these two asserts here for extra safety. 327 assert(chr->is_continues_humongous(), "should still be the case"); 328 assert(chr->humongous_start_region() == r, "sanity"); 329 } 330 } 331 332 bool res = blk->doHeapRegion(r); 333 if (res) { 334 return; 335 } 336 } 337 } 338 339 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) { 340 assert(length() > 0, "the region sequence should not be empty"); 341 assert(length() <= _allocated_heapregions_length, "invariant"); 342 assert(_allocated_heapregions_length > 0, "we should have at least one region committed"); 343 assert(num_regions_to_remove < length(), "We should never remove all regions"); 344 345 if (num_regions_to_remove == 0) { 346 return 0; 347 } 348 349 uint removed = 0; 350 uint cur = _allocated_heapregions_length - 1; 351 uint idx_last_found = 0; 352 uint num_last_found = 0; 353 354 while ((removed < num_regions_to_remove) && 355 (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) { 356 uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found); 357 358 uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove); 359 360 cur -= num_last_found; 361 removed += to_remove; 362 } 363 364 verify_optional(); 365 366 return removed; 367 } 368 369 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { 370 guarantee(start_idx < _allocated_heapregions_length, "checking"); 371 guarantee(res_idx != NULL, "checking"); 372 373 uint num_regions_found = 0; 374 375 jlong cur = start_idx; 376 while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) { 377 cur--; 378 } 379 if (cur == -1) { 380 return num_regions_found; 381 } 382 jlong old_cur = cur; 383 // cur indexes the first empty region 384 while (cur != -1 && is_available(cur) && at(cur)->is_empty()) { 385 cur--; 386 } 387 *res_idx = cur + 1; 388 num_regions_found = old_cur - cur; 389 390 #ifdef ASSERT 391 for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) { 392 assert(at(i)->is_empty(), "just checking"); 393 } 394 #endif 395 return num_regions_found; 396 } 397 398 void HeapRegionManager::verify() { 399 guarantee(length() <= _allocated_heapregions_length, 400 err_msg("invariant: _length: %u _allocated_length: %u", 401 length(), _allocated_heapregions_length)); 402 guarantee(_allocated_heapregions_length <= max_length(), 403 err_msg("invariant: _allocated_length: %u _max_length: %u", 404 _allocated_heapregions_length, max_length())); 405 406 bool prev_committed = true; 407 uint num_committed = 0; 408 HeapWord* prev_end = heap_bottom(); 409 for (uint i = 0; i < _allocated_heapregions_length; i++) { 410 if (!is_available(i)) { 411 prev_committed = false; 412 continue; 413 } 414 num_committed++; 415 HeapRegion* hr = _regions.get_by_index(i); 416 guarantee(hr != NULL, err_msg("invariant: i: %u", i)); 417 guarantee(!prev_committed || hr->bottom() == prev_end, 418 err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, 419 i, HR_FORMAT_PARAMS(hr), p2i(prev_end))); 420 guarantee(hr->hrm_index() == i, 421 err_msg("invariant: i: %u hrm_index(): %u", i, hr->hrm_index())); 422 // Asserts will fire if i is >= _length 423 HeapWord* addr = hr->bottom(); 424 guarantee(addr_to_region(addr) == hr, "sanity"); 425 // We cannot check whether the region is part of a particular set: at the time 426 // this method may be called, we have only completed allocation of the regions, 427 // but not put into a region set. 428 prev_committed = true; 429 if (hr->is_starts_humongous()) { 430 prev_end = hr->orig_end(); 431 } else { 432 prev_end = hr->end(); 433 } 434 } 435 for (uint i = _allocated_heapregions_length; i < max_length(); i++) { 436 guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i)); 437 } 438 439 guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed)); 440 _free_list.verify(); 441 } 442 443 #ifndef PRODUCT 444 void HeapRegionManager::verify_optional() { 445 verify(); 446 } 447 #endif // PRODUCT 448 449 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) : 450 _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) { 451 assert(n_workers > 0, "Need at least one worker."); 452 _claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC); 453 memset(_claims, Unclaimed, sizeof(*_claims) * _n_regions); 454 } 455 456 HeapRegionClaimer::~HeapRegionClaimer() { 457 if (_claims != NULL) { 458 FREE_C_HEAP_ARRAY(uint, _claims); 459 } 460 } 461 462 uint HeapRegionClaimer::start_region_for_worker(uint worker_id) const { 463 assert(worker_id < _n_workers, "Invalid worker_id."); 464 return _n_regions * worker_id / _n_workers; 465 } 466 467 bool HeapRegionClaimer::is_region_claimed(uint region_index) const { 468 assert(region_index < _n_regions, "Invalid index."); 469 return _claims[region_index] == Claimed; 470 } 471 472 bool HeapRegionClaimer::claim_region(uint region_index) { 473 assert(region_index < _n_regions, "Invalid index."); 474 uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed); 475 return old_val == Unclaimed; 476 }