1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Arguments.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1ConcurrentRefine.hpp" 29 #include "gc/g1/heapRegion.hpp" 30 #include "gc/g1/heapRegionManager.inline.hpp" 31 #include "gc/g1/heapRegionSet.inline.hpp" 32 #include "gc/g1/heterogeneousHeapRegionManager.hpp" 33 #include "logging/logStream.hpp" 34 #include "memory/allocation.hpp" 35 #include "utilities/bitMap.inline.hpp" 36 37 class MasterFreeRegionListChecker : public HeapRegionSetChecker { 38 public: 39 void check_mt_safety() { 40 // Master Free List MT safety protocol: 41 // (a) If we're at a safepoint, operations on the master free list 42 // should be invoked by either the VM thread (which will serialize 43 // them) or by the GC workers while holding the 44 // FreeList_lock. 45 // (b) If we're not at a safepoint, operations on the master free 46 // list should be invoked while holding the Heap_lock. 47 48 if (SafepointSynchronize::is_at_safepoint()) { 49 guarantee(Thread::current()->is_VM_thread() || 50 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint"); 51 } else { 52 guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint"); 53 } 54 } 55 bool is_correct_type(HeapRegion* hr) { return hr->is_free(); } 56 const char* get_description() { return "Free Regions"; } 57 }; 58 59 HeapRegionManager::HeapRegionManager() : 60 _bot_mapper(NULL), 61 _cardtable_mapper(NULL), 62 _card_counts_mapper(NULL), 63 _available_map(mtGC), 64 _num_committed(0), 65 _allocated_heapregions_length(0), 66 _regions(), _heap_mapper(NULL), 67 _prev_bitmap_mapper(NULL), 68 _next_bitmap_mapper(NULL), 69 _free_list("Free list", new MasterFreeRegionListChecker()) 70 { } 71 72 HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap) { 73 if (G1Arguments::is_heterogeneous_heap()) { 74 return new HeterogeneousHeapRegionManager((uint)(G1Arguments::heap_max_size_bytes() / HeapRegion::GrainBytes) /*heap size as num of regions*/); 75 } 76 return new HeapRegionManager(); 77 } 78 79 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, 80 G1RegionToSpaceMapper* prev_bitmap, 81 G1RegionToSpaceMapper* next_bitmap, 82 G1RegionToSpaceMapper* bot, 83 G1RegionToSpaceMapper* cardtable, 84 G1RegionToSpaceMapper* card_counts) { 85 _allocated_heapregions_length = 0; 86 87 _heap_mapper = heap_storage; 88 89 _prev_bitmap_mapper = prev_bitmap; 90 _next_bitmap_mapper = next_bitmap; 91 92 _bot_mapper = bot; 93 _cardtable_mapper = cardtable; 94 95 _card_counts_mapper = card_counts; 96 97 MemRegion reserved = heap_storage->reserved(); 98 _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes); 99 100 _available_map.initialize(_regions.length()); 101 } 102 103 bool HeapRegionManager::is_available(uint region) const { 104 return _available_map.at(region); 105 } 106 107 HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) { 108 G1MemoryNodeManager* mgr = G1MemoryNodeManager::mgr(); 109 HeapRegion* hr = NULL; 110 bool from_head = !type.is_young(); 111 112 if (mgr->num_active_nodes() > 1) { 113 uint valid_node_index = mgr->valid_node_index(requested_node_index); 114 // Try to allocate with requested node index. 115 hr = _free_list.remove_region_with_node_index(from_head, valid_node_index, NULL); 116 } 117 118 if (hr == NULL) { 119 // If there's a single active node or we did not get a region from our requested node, 120 // try without requested node index. 121 hr = _free_list.remove_region(from_head); 122 } 123 124 if (hr != NULL) { 125 assert(hr->next() == NULL, "Single region should not have next"); 126 assert(is_available(hr->hrm_index()), "Must be committed"); 127 if (G1VerifyNUMAIdOfHeapRegions) { 128 // Read actual node index via system call. 129 uint actual_node_index = mgr->index_of_address(hr->bottom()); 130 if (hr->node_index() != actual_node_index) { 131 log_debug(gc, heap, numa)("Heap Region (%u) has different node index. " 132 "actual index=%u, index=%u", 133 hr->hrm_index(), actual_node_index, hr->node_index()); 134 } 135 } 136 } 137 138 return hr; 139 } 140 141 #ifdef ASSERT 142 bool HeapRegionManager::is_free(HeapRegion* hr) const { 143 return _free_list.contains(hr); 144 } 145 #endif 146 147 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) { 148 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 149 HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index); 150 MemRegion mr(bottom, bottom + HeapRegion::GrainWords); 151 assert(reserved().contains(mr), "invariant"); 152 return g1h->new_heap_region(hrm_index, mr); 153 } 154 155 void HeapRegionManager::commit_regions(uint index, size_t num_regions, uint node_index, WorkGang* pretouch_gang) { 156 guarantee(num_regions > 0, "Must commit more than zero regions"); 157 guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions"); 158 159 _num_committed += (uint)num_regions; 160 161 _heap_mapper->commit_regions(index, num_regions, node_index, pretouch_gang); 162 163 // Also commit auxiliary data 164 _prev_bitmap_mapper->commit_regions(index, num_regions, node_index, pretouch_gang); 165 _next_bitmap_mapper->commit_regions(index, num_regions, node_index, pretouch_gang); 166 167 _bot_mapper->commit_regions(index, num_regions, node_index, pretouch_gang); 168 _cardtable_mapper->commit_regions(index, num_regions, node_index, pretouch_gang); 169 170 _card_counts_mapper->commit_regions(index, num_regions, node_index, pretouch_gang); 171 } 172 173 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) { 174 guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start); 175 guarantee(_num_committed >= num_regions, "pre-condition"); 176 177 // Reset node index to distinguish with committed regions. 178 for (uint i = start; i < start + num_regions; i++) { 179 at(i)->set_node_index(G1MemoryNodeManager::InvalidNodeIndex); 180 } 181 182 // Print before uncommitting. 183 if (G1CollectedHeap::heap()->hr_printer()->is_active()) { 184 for (uint i = start; i < start + num_regions; i++) { 185 HeapRegion* hr = at(i); 186 G1CollectedHeap::heap()->hr_printer()->uncommit(hr); 187 } 188 } 189 190 _num_committed -= (uint)num_regions; 191 192 _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range); 193 _heap_mapper->uncommit_regions(start, num_regions); 194 195 // Also uncommit auxiliary data 196 _prev_bitmap_mapper->uncommit_regions(start, num_regions); 197 _next_bitmap_mapper->uncommit_regions(start, num_regions); 198 199 _bot_mapper->uncommit_regions(start, num_regions); 200 _cardtable_mapper->uncommit_regions(start, num_regions); 201 202 _card_counts_mapper->uncommit_regions(start, num_regions); 203 } 204 205 static void print_numa_id_of_regions(uint start, uint num_regions) { 206 LogTarget(Debug, gc, heap, numa) lt; 207 208 if (lt.is_enabled()) { 209 LogStream ls(lt); 210 211 // Print header 212 // Below logs are checked by TestG1NUMATouchRegions.java. 213 ls.print_cr("Numa id of heap regions from %u to %u", start, start + num_regions - 1); 214 ls.print_cr("Heap Region# : numa id of pages"); 215 216 for (uint i = start; i < start + num_regions; i++) { 217 ls.print_cr("%6u : %u", i, G1CollectedHeap::heap()->region_at(i)->node_index()); 218 } 219 } 220 } 221 222 void HeapRegionManager::make_regions_available(uint start, uint num_regions, uint node_index, WorkGang* pretouch_gang) { 223 guarantee(num_regions > 0, "No point in calling this for zero regions"); 224 commit_regions(start, num_regions, node_index, pretouch_gang); 225 for (uint i = start; i < start + num_regions; i++) { 226 if (_regions.get_by_index(i) == NULL) { 227 HeapRegion* new_hr = new_heap_region(i); 228 OrderAccess::storestore(); 229 _regions.set_by_index(i, new_hr); 230 _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1); 231 } 232 } 233 234 _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range); 235 236 for (uint i = start; i < start + num_regions; i++) { 237 assert(is_available(i), "Just made region %u available but is apparently not.", i); 238 HeapRegion* hr = at(i); 239 if (G1CollectedHeap::heap()->hr_printer()->is_active()) { 240 G1CollectedHeap::heap()->hr_printer()->commit(hr); 241 } 242 HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i); 243 MemRegion mr(bottom, bottom + HeapRegion::GrainWords); 244 245 hr->initialize(mr); 246 insert_into_free_list(at(i)); 247 // Set node index of the heap region after initialization. 248 hr->set_node_index(G1MemoryNodeManager::mgr()->index_of_address(bottom)); 249 } 250 251 if (G1PrintNUMAIdOfHeapRegions) { 252 print_numa_id_of_regions(start, num_regions); 253 } 254 } 255 256 MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const { 257 size_t used_sz = 258 _prev_bitmap_mapper->committed_size() + 259 _next_bitmap_mapper->committed_size() + 260 _bot_mapper->committed_size() + 261 _cardtable_mapper->committed_size() + 262 _card_counts_mapper->committed_size(); 263 264 size_t committed_sz = 265 _prev_bitmap_mapper->reserved_size() + 266 _next_bitmap_mapper->reserved_size() + 267 _bot_mapper->reserved_size() + 268 _cardtable_mapper->reserved_size() + 269 _card_counts_mapper->reserved_size(); 270 271 return MemoryUsage(0, used_sz, committed_sz, committed_sz); 272 } 273 274 uint HeapRegionManager::expand_by(uint num_regions, uint node_index, WorkGang* pretouch_workers) { 275 return expand_at(0, num_regions, node_index, pretouch_workers); 276 } 277 278 uint HeapRegionManager::expand_at(uint start, uint num_regions, uint node_index, WorkGang* pretouch_workers) { 279 if (num_regions == 0) { 280 return 0; 281 } 282 283 uint cur = start; 284 uint idx_last_found = 0; 285 uint num_last_found = 0; 286 287 uint expanded = 0; 288 289 while (expanded < num_regions && 290 (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) { 291 uint to_expand = MIN2(num_regions - expanded, num_last_found); 292 make_regions_available(idx_last_found, to_expand, node_index, pretouch_workers); 293 expanded += to_expand; 294 cur = idx_last_found + num_last_found + 1; 295 } 296 297 verify_optional(); 298 return expanded; 299 } 300 301 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) { 302 uint found = 0; 303 size_t length_found = 0; 304 uint cur = 0; 305 306 while (length_found < num && cur < max_length()) { 307 HeapRegion* hr = _regions.get_by_index(cur); 308 if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { 309 // This region is a potential candidate for allocation into. 310 length_found++; 311 } else { 312 // This region is not a candidate. The next region is the next possible one. 313 found = cur + 1; 314 length_found = 0; 315 } 316 cur++; 317 } 318 319 if (length_found == num) { 320 for (uint i = found; i < (found + num); i++) { 321 HeapRegion* hr = _regions.get_by_index(i); 322 // sanity check 323 guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), 324 "Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT 325 " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)); 326 } 327 return found; 328 } else { 329 return G1_NO_HRM_INDEX; 330 } 331 } 332 333 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const { 334 guarantee(r != NULL, "Start region must be a valid region"); 335 guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index()); 336 for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) { 337 HeapRegion* hr = _regions.get_by_index(i); 338 if (is_available(i)) { 339 return hr; 340 } 341 } 342 return NULL; 343 } 344 345 void HeapRegionManager::iterate(HeapRegionClosure* blk) const { 346 uint len = max_length(); 347 348 for (uint i = 0; i < len; i++) { 349 if (!is_available(i)) { 350 continue; 351 } 352 guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i); 353 bool res = blk->do_heap_region(at(i)); 354 if (res) { 355 blk->set_incomplete(); 356 return; 357 } 358 } 359 } 360 361 uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const { 362 guarantee(res_idx != NULL, "checking"); 363 guarantee(start_idx <= (max_length() + 1), "checking"); 364 365 uint num_regions = 0; 366 367 uint cur = start_idx; 368 while (cur < max_length() && is_available(cur)) { 369 cur++; 370 } 371 if (cur == max_length()) { 372 return num_regions; 373 } 374 *res_idx = cur; 375 while (cur < max_length() && !is_available(cur)) { 376 cur++; 377 } 378 num_regions = cur - *res_idx; 379 #ifdef ASSERT 380 for (uint i = *res_idx; i < (*res_idx + num_regions); i++) { 381 assert(!is_available(i), "just checking"); 382 } 383 assert(cur == max_length() || num_regions == 0 || is_available(cur), 384 "The region at the current position %u must be available or at the end of the heap.", cur); 385 #endif 386 return num_regions; 387 } 388 389 uint HeapRegionManager::find_highest_free(bool* expanded) { 390 // Loop downwards from the highest region index, looking for an 391 // entry which is either free or not yet committed. If not yet 392 // committed, expand_at that index. 393 uint curr = max_length() - 1; 394 while (true) { 395 HeapRegion *hr = _regions.get_by_index(curr); 396 if (hr == NULL || !is_available(curr)) { 397 uint res = expand_at(curr, 1, G1MemoryNodeManager::AnyNodeIndex, NULL); 398 if (res == 1) { 399 *expanded = true; 400 return curr; 401 } 402 } else { 403 if (hr->is_free()) { 404 *expanded = false; 405 return curr; 406 } 407 } 408 if (curr == 0) { 409 return G1_NO_HRM_INDEX; 410 } 411 curr--; 412 } 413 } 414 415 bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers) { 416 size_t commits = 0; 417 uint start_index = (uint)_regions.get_index_by_address(range.start()); 418 uint last_index = (uint)_regions.get_index_by_address(range.last()); 419 420 // Ensure that each G1 region in the range is free, returning false if not. 421 // Commit those that are not yet available, and keep count. 422 for (uint curr_index = start_index; curr_index <= last_index; curr_index++) { 423 if (!is_available(curr_index)) { 424 commits++; 425 expand_at(curr_index, 1, G1MemoryNodeManager::AnyNodeIndex, pretouch_workers); 426 } 427 HeapRegion* curr_region = _regions.get_by_index(curr_index); 428 if (!curr_region->is_free()) { 429 return false; 430 } 431 } 432 433 allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1); 434 *commit_count = commits; 435 return true; 436 } 437 438 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const { 439 // Every worker will actually look at all regions, skipping over regions that 440 // are currently not committed. 441 // This also (potentially) iterates over regions newly allocated during GC. This 442 // is no problem except for some extra work. 443 const uint n_regions = hrclaimer->n_regions(); 444 for (uint count = 0; count < n_regions; count++) { 445 const uint index = (start_index + count) % n_regions; 446 assert(index < n_regions, "sanity"); 447 // Skip over unavailable regions 448 if (!is_available(index)) { 449 continue; 450 } 451 HeapRegion* r = _regions.get_by_index(index); 452 // We'll ignore regions already claimed. 453 // However, if the iteration is specified as concurrent, the values for 454 // is_starts_humongous and is_continues_humongous can not be trusted, 455 // and we should just blindly iterate over regions regardless of their 456 // humongous status. 457 if (hrclaimer->is_region_claimed(index)) { 458 continue; 459 } 460 // OK, try to claim it 461 if (!hrclaimer->claim_region(index)) { 462 continue; 463 } 464 bool res = blk->do_heap_region(r); 465 if (res) { 466 return; 467 } 468 } 469 } 470 471 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) { 472 assert(length() > 0, "the region sequence should not be empty"); 473 assert(length() <= _allocated_heapregions_length, "invariant"); 474 assert(_allocated_heapregions_length > 0, "we should have at least one region committed"); 475 assert(num_regions_to_remove < length(), "We should never remove all regions"); 476 477 if (num_regions_to_remove == 0) { 478 return 0; 479 } 480 481 uint removed = 0; 482 uint cur = _allocated_heapregions_length - 1; 483 uint idx_last_found = 0; 484 uint num_last_found = 0; 485 486 while ((removed < num_regions_to_remove) && 487 (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) { 488 uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found); 489 490 shrink_at(idx_last_found + num_last_found - to_remove, to_remove); 491 492 cur = idx_last_found; 493 removed += to_remove; 494 } 495 496 verify_optional(); 497 498 return removed; 499 } 500 501 void HeapRegionManager::shrink_at(uint index, size_t num_regions) { 502 #ifdef ASSERT 503 for (uint i = index; i < (index + num_regions); i++) { 504 assert(is_available(i), "Expected available region at index %u", i); 505 assert(at(i)->is_empty(), "Expected empty region at index %u", i); 506 assert(at(i)->is_free(), "Expected free region at index %u", i); 507 } 508 #endif 509 uncommit_regions(index, num_regions); 510 } 511 512 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { 513 guarantee(start_idx < _allocated_heapregions_length, "checking"); 514 guarantee(res_idx != NULL, "checking"); 515 516 uint num_regions_found = 0; 517 518 jlong cur = start_idx; 519 while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) { 520 cur--; 521 } 522 if (cur == -1) { 523 return num_regions_found; 524 } 525 jlong old_cur = cur; 526 // cur indexes the first empty region 527 while (cur != -1 && is_available(cur) && at(cur)->is_empty()) { 528 cur--; 529 } 530 *res_idx = cur + 1; 531 num_regions_found = old_cur - cur; 532 533 #ifdef ASSERT 534 for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) { 535 assert(at(i)->is_empty(), "just checking"); 536 } 537 #endif 538 return num_regions_found; 539 } 540 541 void HeapRegionManager::verify() { 542 guarantee(length() <= _allocated_heapregions_length, 543 "invariant: _length: %u _allocated_length: %u", 544 length(), _allocated_heapregions_length); 545 guarantee(_allocated_heapregions_length <= max_length(), 546 "invariant: _allocated_length: %u _max_length: %u", 547 _allocated_heapregions_length, max_length()); 548 549 bool prev_committed = true; 550 uint num_committed = 0; 551 HeapWord* prev_end = heap_bottom(); 552 for (uint i = 0; i < _allocated_heapregions_length; i++) { 553 if (!is_available(i)) { 554 prev_committed = false; 555 continue; 556 } 557 num_committed++; 558 HeapRegion* hr = _regions.get_by_index(i); 559 guarantee(hr != NULL, "invariant: i: %u", i); 560 guarantee(!prev_committed || hr->bottom() == prev_end, 561 "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT, 562 i, HR_FORMAT_PARAMS(hr), p2i(prev_end)); 563 guarantee(hr->hrm_index() == i, 564 "invariant: i: %u hrm_index(): %u", i, hr->hrm_index()); 565 // Asserts will fire if i is >= _length 566 HeapWord* addr = hr->bottom(); 567 guarantee(addr_to_region(addr) == hr, "sanity"); 568 // We cannot check whether the region is part of a particular set: at the time 569 // this method may be called, we have only completed allocation of the regions, 570 // but not put into a region set. 571 prev_committed = true; 572 prev_end = hr->end(); 573 } 574 for (uint i = _allocated_heapregions_length; i < max_length(); i++) { 575 guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i); 576 } 577 578 guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed); 579 _free_list.verify(); 580 } 581 582 #ifndef PRODUCT 583 void HeapRegionManager::verify_optional() { 584 verify(); 585 } 586 #endif // PRODUCT 587 588 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) : 589 _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) { 590 assert(n_workers > 0, "Need at least one worker."); 591 uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC); 592 memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions); 593 _claims = new_claims; 594 } 595 596 HeapRegionClaimer::~HeapRegionClaimer() { 597 FREE_C_HEAP_ARRAY(uint, _claims); 598 } 599 600 uint HeapRegionClaimer::offset_for_worker(uint worker_id) const { 601 assert(worker_id < _n_workers, "Invalid worker_id."); 602 return _n_regions * worker_id / _n_workers; 603 } 604 605 bool HeapRegionClaimer::is_region_claimed(uint region_index) const { 606 assert(region_index < _n_regions, "Invalid index."); 607 return _claims[region_index] == Claimed; 608 } 609 610 bool HeapRegionClaimer::claim_region(uint region_index) { 611 assert(region_index < _n_regions, "Invalid index."); 612 uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed); 613 return old_val == Unclaimed; 614 }