1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Arguments.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1ConcurrentRefine.hpp" 29 #include "gc/g1/g1NUMAStats.hpp" 30 #include "gc/g1/heapRegion.hpp" 31 #include "gc/g1/heapRegionManager.inline.hpp" 32 #include "gc/g1/heapRegionSet.inline.hpp" 33 #include "gc/g1/heterogeneousHeapRegionManager.hpp" 34 #include "jfr/jfrEvents.hpp" 35 #include "logging/logStream.hpp" 36 #include "memory/allocation.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/orderAccess.hpp" 39 #include "utilities/bitMap.inline.hpp" 40 41 class MasterFreeRegionListChecker : public HeapRegionSetChecker { 42 public: 43 void check_mt_safety() { 44 // Master Free List MT safety protocol: 45 // (a) If we're at a safepoint, operations on the master free list 46 // should be invoked by either the VM thread (which will serialize 47 // them) or by the GC workers while holding the 48 // FreeList_lock. 49 // (b) If we're not at a safepoint, operations on the master free 50 // list should be invoked while holding the Heap_lock. 51 52 if (SafepointSynchronize::is_at_safepoint()) { 53 guarantee(Thread::current()->is_VM_thread() || 54 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint"); 55 } else { 56 guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint"); 57 } 58 } 59 bool is_correct_type(HeapRegion* hr) { return hr->is_free(); } 60 const char* get_description() { return "Free Regions"; } 61 }; 62 63 HeapRegionRange::HeapRegionRange(uint start, uint end) : _start(start), _end(end) { 64 assert(start <= end, "Invariant"); 65 } 66 67 HeapRegionManager::HeapRegionManager() : 68 _bot_mapper(NULL), 69 _cardtable_mapper(NULL), 70 _card_counts_mapper(NULL), 71 _available_map(mtGC), 72 _num_committed(0), 73 _allocated_heapregions_length(0), 74 _regions(), _heap_mapper(NULL), 75 _prev_bitmap_mapper(NULL), 76 _next_bitmap_mapper(NULL), 77 _free_list("Free list", new MasterFreeRegionListChecker()) 78 { } 79 80 HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap) { 81 if (G1Arguments::is_heterogeneous_heap()) { 82 return new HeterogeneousHeapRegionManager((uint)(G1Arguments::heap_max_size_bytes() / HeapRegion::GrainBytes) /*heap size as num of regions*/); 83 } 84 return new HeapRegionManager(); 85 } 86 87 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, 88 G1RegionToSpaceMapper* prev_bitmap, 89 G1RegionToSpaceMapper* next_bitmap, 90 G1RegionToSpaceMapper* bot, 91 G1RegionToSpaceMapper* cardtable, 92 G1RegionToSpaceMapper* card_counts) { 93 _allocated_heapregions_length = 0; 94 95 _heap_mapper = heap_storage; 96 97 _prev_bitmap_mapper = prev_bitmap; 98 _next_bitmap_mapper = next_bitmap; 99 100 _bot_mapper = bot; 101 _cardtable_mapper = cardtable; 102 103 _card_counts_mapper = card_counts; 104 105 MemRegion reserved = heap_storage->reserved(); 106 _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes); 107 108 _available_map.initialize(_regions.length()); 109 } 110 111 bool HeapRegionManager::is_available(uint region) const { 112 return _available_map.at(region); 113 } 114 115 HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) { 116 HeapRegion* hr = NULL; 117 bool from_head = !type.is_young(); 118 G1NUMA* numa = G1NUMA::numa(); 119 120 if (requested_node_index != G1NUMA::AnyNodeIndex && numa->is_enabled()) { 121 // Try to allocate with requested node index. 122 hr = _free_list.remove_region_with_node_index(from_head, requested_node_index); 123 } 124 125 if (hr == NULL) { 126 // If there's a single active node or we did not get a region from our requested node, 127 // try without requested node index. 128 hr = _free_list.remove_region(from_head); 129 } 130 131 if (hr != NULL) { 132 assert(hr->next() == NULL, "Single region should not have next"); 133 assert(is_available(hr->hrm_index()), "Must be committed"); 134 135 if (numa->is_enabled() && hr->node_index() < numa->num_active_nodes()) { 136 numa->update_statistics(G1NUMAStats::NewRegionAlloc, requested_node_index, hr->node_index()); 137 } 138 } 139 140 return hr; 141 } 142 143 HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_regions) { 144 uint candidate = find_contiguous_in_free_list(num_regions); 145 if (candidate == G1_NO_HRM_INDEX) { 146 return NULL; 147 } 148 return allocate_free_regions_starting_at(candidate, num_regions); 149 } 150 151 HeapRegion* HeapRegionManager::allocate_humongous_allow_expand(uint num_regions) { 152 uint candidate = find_contiguous_allow_expand(num_regions); 153 if (candidate == G1_NO_HRM_INDEX) { 154 return NULL; 155 } 156 expand_exact(candidate, num_regions, G1CollectedHeap::heap()->workers()); 157 return allocate_free_regions_starting_at(candidate, num_regions); 158 } 159 160 HeapRegion* HeapRegionManager::allocate_humongous(uint num_regions) { 161 // Special case a single region to avoid expensive search. 162 if (num_regions == 1) { 163 return allocate_free_region(HeapRegionType::Humongous, G1NUMA::AnyNodeIndex); 164 } 165 return allocate_humongous_from_free_list(num_regions); 166 } 167 168 HeapRegion* HeapRegionManager::expand_and_allocate_humongous(uint num_regions) { 169 return allocate_humongous_allow_expand(num_regions); 170 } 171 172 #ifdef ASSERT 173 bool HeapRegionManager::is_free(HeapRegion* hr) const { 174 return _free_list.contains(hr); 175 } 176 #endif 177 178 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) { 179 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 180 HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index); 181 MemRegion mr(bottom, bottom + HeapRegion::GrainWords); 182 assert(reserved().contains(mr), "invariant"); 183 return g1h->new_heap_region(hrm_index, mr); 184 } 185 186 void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) { 187 guarantee(num_regions > 0, "Must commit more than zero regions"); 188 guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions"); 189 190 _num_committed += (uint)num_regions; 191 192 _heap_mapper->commit_regions(index, num_regions, pretouch_gang); 193 194 // Also commit auxiliary data 195 _prev_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang); 196 _next_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang); 197 198 _bot_mapper->commit_regions(index, num_regions, pretouch_gang); 199 _cardtable_mapper->commit_regions(index, num_regions, pretouch_gang); 200 201 _card_counts_mapper->commit_regions(index, num_regions, pretouch_gang); 202 } 203 204 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) { 205 guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start); 206 guarantee(_num_committed >= num_regions, "pre-condition"); 207 208 // Reset node index to distinguish with committed regions. 209 for (uint i = start; i < start + num_regions; i++) { 210 at(i)->set_node_index(G1NUMA::UnknownNodeIndex); 211 } 212 213 // Print before uncommitting. 214 if (G1CollectedHeap::heap()->hr_printer()->is_active()) { 215 for (uint i = start; i < start + num_regions; i++) { 216 HeapRegion* hr = at(i); 217 G1CollectedHeap::heap()->hr_printer()->uncommit(hr); 218 } 219 } 220 221 _num_committed -= (uint)num_regions; 222 223 _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range); 224 _heap_mapper->uncommit_regions(start, num_regions); 225 226 // Also uncommit auxiliary data 227 _prev_bitmap_mapper->uncommit_regions(start, num_regions); 228 _next_bitmap_mapper->uncommit_regions(start, num_regions); 229 230 _bot_mapper->uncommit_regions(start, num_regions); 231 _cardtable_mapper->uncommit_regions(start, num_regions); 232 233 _card_counts_mapper->uncommit_regions(start, num_regions); 234 } 235 236 void HeapRegionManager::make_regions_available(uint start, uint num_regions, WorkGang* pretouch_gang) { 237 guarantee(num_regions > 0, "No point in calling this for zero regions"); 238 commit_regions(start, num_regions, pretouch_gang); 239 for (uint i = start; i < start + num_regions; i++) { 240 if (_regions.get_by_index(i) == NULL) { 241 HeapRegion* new_hr = new_heap_region(i); 242 OrderAccess::storestore(); 243 _regions.set_by_index(i, new_hr); 244 _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1); 245 } 246 } 247 248 _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range); 249 250 for (uint i = start; i < start + num_regions; i++) { 251 assert(is_available(i), "Just made region %u available but is apparently not.", i); 252 HeapRegion* hr = at(i); 253 if (G1CollectedHeap::heap()->hr_printer()->is_active()) { 254 G1CollectedHeap::heap()->hr_printer()->commit(hr); 255 } 256 257 hr->initialize(); 258 hr->set_node_index(G1NUMA::numa()->index_for_region(hr)); 259 insert_into_free_list(at(i)); 260 } 261 } 262 263 MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const { 264 size_t used_sz = 265 _prev_bitmap_mapper->committed_size() + 266 _next_bitmap_mapper->committed_size() + 267 _bot_mapper->committed_size() + 268 _cardtable_mapper->committed_size() + 269 _card_counts_mapper->committed_size(); 270 271 size_t committed_sz = 272 _prev_bitmap_mapper->reserved_size() + 273 _next_bitmap_mapper->reserved_size() + 274 _bot_mapper->reserved_size() + 275 _cardtable_mapper->reserved_size() + 276 _card_counts_mapper->reserved_size(); 277 278 return MemoryUsage(0, used_sz, committed_sz, committed_sz); 279 } 280 281 uint HeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) { 282 return expand_at(0, num_regions, pretouch_workers); 283 } 284 285 uint HeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) { 286 if (num_regions == 0) { 287 return 0; 288 } 289 290 uint offset = start; 291 uint expanded = 0; 292 293 do { 294 HeapRegionRange regions = find_unavailable_from_idx(offset); 295 if (regions.length() == 0) { 296 // No more unavailable regions. 297 break; 298 } 299 300 uint to_expand = MIN2(num_regions - expanded, regions.length()); 301 make_regions_available(regions.start(), to_expand, pretouch_workers); 302 expanded += to_expand; 303 offset = regions.end(); 304 } while (expanded < num_regions); 305 306 verify_optional(); 307 return expanded; 308 } 309 310 void HeapRegionManager::expand_exact(uint start, uint num_regions, WorkGang* pretouch_workers) { 311 assert(num_regions != 0, "Need to request at least one region"); 312 uint end = start + num_regions; 313 314 for (uint i = start; i < end; i++) { 315 if (!is_available(i)) { 316 make_regions_available(i, 1, pretouch_workers); 317 } 318 } 319 320 verify_optional(); 321 } 322 323 uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) { 324 uint expand_candidate = UINT_MAX; 325 for (uint i = 0; i < max_length(); i++) { 326 if (is_available(i)) { 327 // Already in use continue 328 continue; 329 } 330 // Always save the candidate so we can expand later on. 331 expand_candidate = i; 332 if (is_on_preferred_index(expand_candidate, preferred_index)) { 333 // We have found a candidate on the preffered node, break. 334 break; 335 } 336 } 337 338 if (expand_candidate == UINT_MAX) { 339 // No regions left, expand failed. 340 return 0; 341 } 342 343 expand_exact(expand_candidate, 1, NULL); 344 return 1; 345 } 346 347 bool HeapRegionManager::is_on_preferred_index(uint region_index, uint preferred_node_index) { 348 uint region_node_index = G1NUMA::numa()->preferred_node_index_for_index(region_index); 349 return region_node_index == preferred_node_index; 350 } 351 352 #ifdef ASSERT 353 void HeapRegionManager::assert_contiguous_range(uint start, uint num_regions) { 354 // General sanity check, regions found should either be available and empty 355 // or not available so that we can make them available and use them. 356 for (uint i = start; i < (start + num_regions); i++) { 357 HeapRegion* hr = _regions.get_by_index(i); 358 assert(!is_available(i) || hr->is_free(), 359 "Found region sequence starting at " UINT32_FORMAT ", length " UINT32_FORMAT 360 " that is not free at " UINT32_FORMAT ". Hr is " PTR_FORMAT ", type is %s", 361 start, num_regions, i, p2i(hr), hr->get_type_str()); 362 } 363 } 364 #endif 365 366 uint HeapRegionManager::find_contiguous_in_range(uint start, uint end, uint num_regions) { 367 assert(start <= end, "precondition"); 368 assert(num_regions >= 1, "precondition"); 369 uint candidate = start; // First region in candidate sequence. 370 uint unchecked = candidate; // First unchecked region in candidate. 371 // While the candidate sequence fits in the range... 372 while (num_regions <= (end - candidate)) { 373 // Walk backward over the regions for the current candidate. 374 for (uint i = candidate + num_regions - 1; true; --i) { 375 if (is_available(i) && !at(i)->is_free()) { 376 // Region i can't be used, so restart with i+1 as the start 377 // of a new candidate sequence, and with the region after the 378 // old candidate sequence being the first unchecked region. 379 unchecked = candidate + num_regions; 380 candidate = i + 1; 381 break; 382 } else if (i == unchecked) { 383 // All regions of candidate sequence have passed check. 384 assert_contiguous_range(candidate, num_regions); 385 return candidate; 386 } 387 } 388 } 389 return G1_NO_HRM_INDEX; 390 } 391 392 uint HeapRegionManager::find_contiguous_in_free_list(uint num_regions) { 393 BitMap::idx_t range_start = 0; 394 BitMap::idx_t range_end = range_start; 395 uint candidate = G1_NO_HRM_INDEX; 396 397 do { 398 range_start = _available_map.get_next_one_offset(range_end); 399 range_end = _available_map.get_next_zero_offset(range_start); 400 candidate = find_contiguous_in_range((uint) range_start, (uint) range_end, num_regions); 401 } while (candidate == G1_NO_HRM_INDEX && range_end < max_length()); 402 403 return candidate; 404 } 405 406 uint HeapRegionManager::find_contiguous_allow_expand(uint num_regions) { 407 // Find any candidate. 408 return find_contiguous_in_range(0, max_length(), num_regions); 409 } 410 411 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const { 412 guarantee(r != NULL, "Start region must be a valid region"); 413 guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index()); 414 for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) { 415 HeapRegion* hr = _regions.get_by_index(i); 416 if (is_available(i)) { 417 return hr; 418 } 419 } 420 return NULL; 421 } 422 423 void HeapRegionManager::iterate(HeapRegionClosure* blk) const { 424 uint len = max_length(); 425 426 for (uint i = 0; i < len; i++) { 427 if (!is_available(i)) { 428 continue; 429 } 430 guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i); 431 bool res = blk->do_heap_region(at(i)); 432 if (res) { 433 blk->set_incomplete(); 434 return; 435 } 436 } 437 } 438 439 HeapRegionRange HeapRegionManager::find_unavailable_from_idx(uint index) const { 440 guarantee(index <= max_length(), "checking"); 441 442 // Find first unavailable region from offset. 443 BitMap::idx_t start = _available_map.get_next_zero_offset(index); 444 if (start == _available_map.size()) { 445 // No unavailable regions found. 446 return HeapRegionRange(max_length(), max_length()); 447 } 448 449 // The end of the range is the next available region. 450 BitMap::idx_t end = _available_map.get_next_one_offset(start); 451 452 assert(!_available_map.at(start), "Found region (" SIZE_FORMAT ") is not unavailable", start); 453 assert(!_available_map.at(end - 1), "Last region (" SIZE_FORMAT ") in range is not unavailable", end - 1); 454 assert(end == _available_map.size() || _available_map.at(end), "Region (" SIZE_FORMAT ") is not available", end); 455 456 return HeapRegionRange((uint) start, (uint) end); 457 } 458 459 uint HeapRegionManager::find_highest_free(bool* expanded) { 460 // Loop downwards from the highest region index, looking for an 461 // entry which is either free or not yet committed. If not yet 462 // committed, expand_at that index. 463 uint curr = max_length() - 1; 464 while (true) { 465 HeapRegion *hr = _regions.get_by_index(curr); 466 if (hr == NULL || !is_available(curr)) { 467 uint res = expand_at(curr, 1, NULL); 468 if (res == 1) { 469 *expanded = true; 470 return curr; 471 } 472 } else { 473 if (hr->is_free()) { 474 *expanded = false; 475 return curr; 476 } 477 } 478 if (curr == 0) { 479 return G1_NO_HRM_INDEX; 480 } 481 curr--; 482 } 483 } 484 485 bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers) { 486 size_t commits = 0; 487 uint start_index = (uint)_regions.get_index_by_address(range.start()); 488 uint last_index = (uint)_regions.get_index_by_address(range.last()); 489 490 // Ensure that each G1 region in the range is free, returning false if not. 491 // Commit those that are not yet available, and keep count. 492 for (uint curr_index = start_index; curr_index <= last_index; curr_index++) { 493 if (!is_available(curr_index)) { 494 commits++; 495 expand_at(curr_index, 1, pretouch_workers); 496 } 497 HeapRegion* curr_region = _regions.get_by_index(curr_index); 498 if (!curr_region->is_free()) { 499 return false; 500 } 501 } 502 503 allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1); 504 *commit_count = commits; 505 return true; 506 } 507 508 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const { 509 // Every worker will actually look at all regions, skipping over regions that 510 // are currently not committed. 511 // This also (potentially) iterates over regions newly allocated during GC. This 512 // is no problem except for some extra work. 513 const uint n_regions = hrclaimer->n_regions(); 514 for (uint count = 0; count < n_regions; count++) { 515 const uint index = (start_index + count) % n_regions; 516 assert(index < n_regions, "sanity"); 517 // Skip over unavailable regions 518 if (!is_available(index)) { 519 continue; 520 } 521 HeapRegion* r = _regions.get_by_index(index); 522 // We'll ignore regions already claimed. 523 // However, if the iteration is specified as concurrent, the values for 524 // is_starts_humongous and is_continues_humongous can not be trusted, 525 // and we should just blindly iterate over regions regardless of their 526 // humongous status. 527 if (hrclaimer->is_region_claimed(index)) { 528 continue; 529 } 530 // OK, try to claim it 531 if (!hrclaimer->claim_region(index)) { 532 continue; 533 } 534 bool res = blk->do_heap_region(r); 535 if (res) { 536 return; 537 } 538 } 539 } 540 541 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) { 542 assert(length() > 0, "the region sequence should not be empty"); 543 assert(length() <= _allocated_heapregions_length, "invariant"); 544 assert(_allocated_heapregions_length > 0, "we should have at least one region committed"); 545 assert(num_regions_to_remove < length(), "We should never remove all regions"); 546 547 if (num_regions_to_remove == 0) { 548 return 0; 549 } 550 551 uint removed = 0; 552 uint cur = _allocated_heapregions_length - 1; 553 uint idx_last_found = 0; 554 uint num_last_found = 0; 555 556 while ((removed < num_regions_to_remove) && 557 (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) { 558 uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found); 559 560 shrink_at(idx_last_found + num_last_found - to_remove, to_remove); 561 562 cur = idx_last_found; 563 removed += to_remove; 564 } 565 566 verify_optional(); 567 568 return removed; 569 } 570 571 void HeapRegionManager::shrink_at(uint index, size_t num_regions) { 572 #ifdef ASSERT 573 for (uint i = index; i < (index + num_regions); i++) { 574 assert(is_available(i), "Expected available region at index %u", i); 575 assert(at(i)->is_empty(), "Expected empty region at index %u", i); 576 assert(at(i)->is_free(), "Expected free region at index %u", i); 577 } 578 #endif 579 uncommit_regions(index, num_regions); 580 } 581 582 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { 583 guarantee(start_idx < _allocated_heapregions_length, "checking"); 584 guarantee(res_idx != NULL, "checking"); 585 586 uint num_regions_found = 0; 587 588 jlong cur = start_idx; 589 while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) { 590 cur--; 591 } 592 if (cur == -1) { 593 return num_regions_found; 594 } 595 jlong old_cur = cur; 596 // cur indexes the first empty region 597 while (cur != -1 && is_available(cur) && at(cur)->is_empty()) { 598 cur--; 599 } 600 *res_idx = cur + 1; 601 num_regions_found = old_cur - cur; 602 603 #ifdef ASSERT 604 for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) { 605 assert(at(i)->is_empty(), "just checking"); 606 } 607 #endif 608 return num_regions_found; 609 } 610 611 void HeapRegionManager::verify() { 612 guarantee(length() <= _allocated_heapregions_length, 613 "invariant: _length: %u _allocated_length: %u", 614 length(), _allocated_heapregions_length); 615 guarantee(_allocated_heapregions_length <= max_length(), 616 "invariant: _allocated_length: %u _max_length: %u", 617 _allocated_heapregions_length, max_length()); 618 619 bool prev_committed = true; 620 uint num_committed = 0; 621 HeapWord* prev_end = heap_bottom(); 622 for (uint i = 0; i < _allocated_heapregions_length; i++) { 623 if (!is_available(i)) { 624 prev_committed = false; 625 continue; 626 } 627 num_committed++; 628 HeapRegion* hr = _regions.get_by_index(i); 629 guarantee(hr != NULL, "invariant: i: %u", i); 630 guarantee(!prev_committed || hr->bottom() == prev_end, 631 "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT, 632 i, HR_FORMAT_PARAMS(hr), p2i(prev_end)); 633 guarantee(hr->hrm_index() == i, 634 "invariant: i: %u hrm_index(): %u", i, hr->hrm_index()); 635 // Asserts will fire if i is >= _length 636 HeapWord* addr = hr->bottom(); 637 guarantee(addr_to_region(addr) == hr, "sanity"); 638 // We cannot check whether the region is part of a particular set: at the time 639 // this method may be called, we have only completed allocation of the regions, 640 // but not put into a region set. 641 prev_committed = true; 642 prev_end = hr->end(); 643 } 644 for (uint i = _allocated_heapregions_length; i < max_length(); i++) { 645 guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i); 646 } 647 648 guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed); 649 _free_list.verify(); 650 } 651 652 #ifndef PRODUCT 653 void HeapRegionManager::verify_optional() { 654 verify(); 655 } 656 #endif // PRODUCT 657 658 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) : 659 _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) { 660 assert(n_workers > 0, "Need at least one worker."); 661 uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC); 662 memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions); 663 _claims = new_claims; 664 } 665 666 HeapRegionClaimer::~HeapRegionClaimer() { 667 FREE_C_HEAP_ARRAY(uint, _claims); 668 } 669 670 uint HeapRegionClaimer::offset_for_worker(uint worker_id) const { 671 assert(worker_id < _n_workers, "Invalid worker_id."); 672 return _n_regions * worker_id / _n_workers; 673 } 674 675 bool HeapRegionClaimer::is_region_claimed(uint region_index) const { 676 assert(region_index < _n_regions, "Invalid index."); 677 return _claims[region_index] == Claimed; 678 } 679 680 bool HeapRegionClaimer::claim_region(uint region_index) { 681 assert(region_index < _n_regions, "Invalid index."); 682 uint old_val = Atomic::cmpxchg(&_claims[region_index], Unclaimed, Claimed); 683 return old_val == Unclaimed; 684 } 685 686 class G1RebuildFreeListTask : public AbstractGangTask { 687 HeapRegionManager* _hrm; 688 FreeRegionList* _worker_freelists; 689 uint _worker_chunk_size; 690 uint _num_workers; 691 692 public: 693 G1RebuildFreeListTask(HeapRegionManager* hrm, uint num_workers) : 694 AbstractGangTask("G1 Rebuild Free List Task"), 695 _hrm(hrm), 696 _worker_freelists(NEW_C_HEAP_ARRAY(FreeRegionList, num_workers, mtGC)), 697 _worker_chunk_size((_hrm->max_length() + num_workers - 1) / num_workers), 698 _num_workers(num_workers) { 699 for (uint worker = 0; worker < _num_workers; worker++) { 700 ::new (&_worker_freelists[worker]) FreeRegionList("Appendable Worker Free List"); 701 } 702 } 703 704 ~G1RebuildFreeListTask() { 705 for (uint worker = 0; worker < _num_workers; worker++) { 706 _worker_freelists[worker].~FreeRegionList(); 707 } 708 FREE_C_HEAP_ARRAY(FreeRegionList, _worker_freelists); 709 } 710 711 FreeRegionList* worker_freelist(uint worker) { 712 return &_worker_freelists[worker]; 713 } 714 715 // Each worker creates a free list for a chunk of the heap. The chunks won't 716 // be overlapping so we don't need to do any claiming. 717 void work(uint worker_id) { 718 Ticks start_time = Ticks::now(); 719 EventGCPhaseParallel event; 720 721 uint start = worker_id * _worker_chunk_size; 722 uint end = MIN2(start + _worker_chunk_size, _hrm->max_length()); 723 724 // If start is outside the heap, this worker has nothing to do. 725 if (start > end) { 726 return; 727 } 728 729 FreeRegionList *free_list = worker_freelist(worker_id); 730 for (uint i = start; i < end; i++) { 731 HeapRegion *region = _hrm->at_or_null(i); 732 if (region != NULL && region->is_free()) { 733 // Need to clear old links to allow to be added to new freelist. 734 region->unlink_from_list(); 735 free_list->add_to_tail(region); 736 } 737 } 738 739 event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::RebuildFreeList)); 740 G1CollectedHeap::heap()->phase_times()->record_time_secs(G1GCPhaseTimes::RebuildFreeList, worker_id, (Ticks::now() - start_time).seconds()); 741 } 742 }; 743 744 void HeapRegionManager::rebuild_free_list(WorkGang* workers) { 745 // Abandon current free list to allow a rebuild. 746 _free_list.abandon(); 747 748 uint const num_workers = clamp(max_length(), 1u, workers->active_workers()); 749 G1RebuildFreeListTask task(this, num_workers); 750 751 log_debug(gc, ergo)("Running %s using %u workers for rebuilding free list of regions", 752 task.name(), num_workers); 753 workers->run_task(&task, num_workers); 754 755 // Link the partial free lists together. 756 Ticks serial_time = Ticks::now(); 757 for (uint worker = 0; worker < num_workers; worker++) { 758 _free_list.append_ordered(task.worker_freelist(worker)); 759 } 760 G1CollectedHeap::heap()->phase_times()->record_serial_rebuild_freelist_time_ms((Ticks::now() - serial_time).seconds() * 1000.0); 761 }