1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Arguments.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/g1/g1ConcurrentRefine.hpp"
  29 #include "gc/g1/heapRegion.hpp"
  30 #include "gc/g1/heapRegionManager.inline.hpp"
  31 #include "gc/g1/heapRegionSet.inline.hpp"
  32 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  33 #include "logging/logStream.hpp"
  34 #include "memory/allocation.hpp"
  35 #include "utilities/bitMap.inline.hpp"
  36 
  37 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
  38 public:
  39   void check_mt_safety() {
  40     // Master Free List MT safety protocol:
  41     // (a) If we're at a safepoint, operations on the master free list
  42     // should be invoked by either the VM thread (which will serialize
  43     // them) or by the GC workers while holding the
  44     // FreeList_lock.
  45     // (b) If we're not at a safepoint, operations on the master free
  46     // list should be invoked while holding the Heap_lock.
  47 
  48     if (SafepointSynchronize::is_at_safepoint()) {
  49       guarantee(Thread::current()->is_VM_thread() ||
  50                 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
  51     } else {
  52       guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
  53     }
  54   }
  55   bool is_correct_type(HeapRegion* hr) { return hr->is_free(); }
  56   const char* get_description() { return "Free Regions"; }
  57 };
  58 
  59 HeapRegionManager::HeapRegionManager() :
  60   _bot_mapper(NULL),
  61   _cardtable_mapper(NULL),
  62   _card_counts_mapper(NULL),
  63   _available_map(mtGC),
  64   _num_committed(0),
  65   _allocated_heapregions_length(0),
  66   _regions(), _heap_mapper(NULL),
  67   _prev_bitmap_mapper(NULL),
  68   _next_bitmap_mapper(NULL),
  69   _free_list("Free list", new MasterFreeRegionListChecker())
  70 { }
  71 
  72 HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap) {
  73   if (G1Arguments::is_heterogeneous_heap()) {
  74     return new HeterogeneousHeapRegionManager((uint)(G1Arguments::heap_max_size_bytes() / HeapRegion::GrainBytes) /*heap size as num of regions*/);
  75   }
  76   return new HeapRegionManager();
  77 }
  78 
  79 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
  80                                G1RegionToSpaceMapper* prev_bitmap,
  81                                G1RegionToSpaceMapper* next_bitmap,
  82                                G1RegionToSpaceMapper* bot,
  83                                G1RegionToSpaceMapper* cardtable,
  84                                G1RegionToSpaceMapper* card_counts) {
  85   _allocated_heapregions_length = 0;
  86 
  87   _heap_mapper = heap_storage;
  88 
  89   _prev_bitmap_mapper = prev_bitmap;
  90   _next_bitmap_mapper = next_bitmap;
  91 
  92   _bot_mapper = bot;
  93   _cardtable_mapper = cardtable;
  94 
  95   _card_counts_mapper = card_counts;
  96 
  97   MemRegion reserved = heap_storage->reserved();
  98   _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
  99 
 100   _available_map.initialize(_regions.length());
 101 }
 102 
 103 bool HeapRegionManager::is_available(uint region) const {
 104   return _available_map.at(region);
 105 }
 106 
 107 HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
 108   HeapRegion* hr = NULL;
 109   bool from_head = !type.is_young();
 110 
 111   if (requested_node_index != G1NUMA::AnyNodeIndex && G1NUMA::numa()->is_enabled()) {
 112     // Try to allocate with requested node index.
 113     hr = _free_list.remove_region_with_node_index(from_head, requested_node_index, NULL);
 114   }
 115 
 116   if (hr == NULL) {
 117     // If there's a single active node or we did not get a region from our requested node,
 118     // try without requested node index.
 119     hr = _free_list.remove_region(from_head);
 120   }
 121 
 122   if (hr != NULL) {
 123     assert(hr->next() == NULL, "Single region should not have next");
 124     assert(is_available(hr->hrm_index()), "Must be committed");
 125   }
 126 
 127   return hr;
 128 }
 129 
 130 #ifdef ASSERT
 131 bool HeapRegionManager::is_free(HeapRegion* hr) const {
 132   return _free_list.contains(hr);
 133 }
 134 #endif
 135 
 136 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
 137   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 138   HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
 139   MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
 140   assert(reserved().contains(mr), "invariant");
 141   return g1h->new_heap_region(hrm_index, mr);
 142 }
 143 
 144 void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) {
 145   guarantee(num_regions > 0, "Must commit more than zero regions");
 146   guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
 147 
 148   _num_committed += (uint)num_regions;
 149 
 150   _heap_mapper->commit_regions(index, num_regions, pretouch_gang);
 151 
 152   // Also commit auxiliary data
 153   _prev_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
 154   _next_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
 155 
 156   _bot_mapper->commit_regions(index, num_regions, pretouch_gang);
 157   _cardtable_mapper->commit_regions(index, num_regions, pretouch_gang);
 158 
 159   _card_counts_mapper->commit_regions(index, num_regions, pretouch_gang);
 160 }
 161 
 162 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
 163   guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start);
 164   guarantee(_num_committed >= num_regions, "pre-condition");
 165 
 166   // Reset node index to distinguish with committed regions.
 167   for (uint i = start; i < start + num_regions; i++) {
 168     at(i)->set_node_index(G1NUMA::UnknownNodeIndex);
 169   }
 170 
 171   // Print before uncommitting.
 172   if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
 173     for (uint i = start; i < start + num_regions; i++) {
 174       HeapRegion* hr = at(i);
 175       G1CollectedHeap::heap()->hr_printer()->uncommit(hr);
 176     }
 177   }
 178 
 179   _num_committed -= (uint)num_regions;
 180 
 181   _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
 182   _heap_mapper->uncommit_regions(start, num_regions);
 183 
 184   // Also uncommit auxiliary data
 185   _prev_bitmap_mapper->uncommit_regions(start, num_regions);
 186   _next_bitmap_mapper->uncommit_regions(start, num_regions);
 187 
 188   _bot_mapper->uncommit_regions(start, num_regions);
 189   _cardtable_mapper->uncommit_regions(start, num_regions);
 190 
 191   _card_counts_mapper->uncommit_regions(start, num_regions);
 192 }
 193 
 194 void HeapRegionManager::make_regions_available(uint start, uint num_regions, WorkGang* pretouch_gang) {
 195   guarantee(num_regions > 0, "No point in calling this for zero regions");
 196   commit_regions(start, num_regions, pretouch_gang);
 197   for (uint i = start; i < start + num_regions; i++) {
 198     if (_regions.get_by_index(i) == NULL) {
 199       HeapRegion* new_hr = new_heap_region(i);
 200       OrderAccess::storestore();
 201       _regions.set_by_index(i, new_hr);
 202       _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
 203     }
 204   }
 205 
 206   _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
 207 
 208   for (uint i = start; i < start + num_regions; i++) {
 209     assert(is_available(i), "Just made region %u available but is apparently not.", i);
 210     HeapRegion* hr = at(i);
 211     if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
 212       G1CollectedHeap::heap()->hr_printer()->commit(hr);
 213     }
 214     HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
 215     MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
 216 
 217     hr->initialize(mr);
 218     hr->set_node_index(G1NUMA::numa()->index_for_region(hr));
 219     insert_into_free_list(at(i));
 220   }
 221 }
 222 
 223 MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
 224   size_t used_sz =
 225     _prev_bitmap_mapper->committed_size() +
 226     _next_bitmap_mapper->committed_size() +
 227     _bot_mapper->committed_size() +
 228     _cardtable_mapper->committed_size() +
 229     _card_counts_mapper->committed_size();
 230 
 231   size_t committed_sz =
 232     _prev_bitmap_mapper->reserved_size() +
 233     _next_bitmap_mapper->reserved_size() +
 234     _bot_mapper->reserved_size() +
 235     _cardtable_mapper->reserved_size() +
 236     _card_counts_mapper->reserved_size();
 237 
 238   return MemoryUsage(0, used_sz, committed_sz, committed_sz);
 239 }
 240 
 241 uint HeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) {
 242   return expand_at(0, num_regions, pretouch_workers);
 243 }
 244 
 245 uint HeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) {
 246   if (num_regions == 0) {
 247     return 0;
 248   }
 249 
 250   uint cur = start;
 251   uint idx_last_found = 0;
 252   uint num_last_found = 0;
 253 
 254   uint expanded = 0;
 255 
 256   while (expanded < num_regions &&
 257          (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
 258     uint to_expand = MIN2(num_regions - expanded, num_last_found);
 259     make_regions_available(idx_last_found, to_expand, pretouch_workers);
 260     expanded += to_expand;
 261     cur = idx_last_found + num_last_found + 1;
 262   }
 263 
 264   verify_optional();
 265   return expanded;
 266 }
 267 
 268 uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) {
 269   uint expand_candidate = UINT_MAX;
 270   for (uint i = 0; i < max_length(); i++) {
 271     if (is_available(i)) {
 272       // Already in use continue
 273       continue;
 274     }
 275     // Always save the candidate so we can expand later on.
 276     expand_candidate = i;
 277     if (is_on_preferred_index(expand_candidate, preferred_index)) {
 278       // We have found a candidate on the preffered node, break.
 279       break;
 280     }
 281   }
 282 
 283   if (expand_candidate == UINT_MAX) {
 284      // No regions left, expand failed.
 285     return 0;
 286   }
 287 
 288   make_regions_available(expand_candidate, 1, NULL);
 289   return 1;
 290 }
 291 
 292 bool HeapRegionManager::is_on_preferred_index(uint region_index, uint preferred_node_index) {
 293   uint region_node_index = G1NUMA::numa()->preferred_node_index_for_index(region_index);
 294   return region_node_index == preferred_node_index;
 295 }
 296 
 297 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
 298   uint found = 0;
 299   size_t length_found = 0;
 300   uint cur = 0;
 301 
 302   while (length_found < num && cur < max_length()) {
 303     HeapRegion* hr = _regions.get_by_index(cur);
 304     if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
 305       // This region is a potential candidate for allocation into.
 306       length_found++;
 307     } else {
 308       // This region is not a candidate. The next region is the next possible one.
 309       found = cur + 1;
 310       length_found = 0;
 311     }
 312     cur++;
 313   }
 314 
 315   if (length_found == num) {
 316     for (uint i = found; i < (found + num); i++) {
 317       HeapRegion* hr = _regions.get_by_index(i);
 318       // sanity check
 319       guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
 320                 "Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
 321                 " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr));
 322     }
 323     return found;
 324   } else {
 325     return G1_NO_HRM_INDEX;
 326   }
 327 }
 328 
 329 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
 330   guarantee(r != NULL, "Start region must be a valid region");
 331   guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index());
 332   for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
 333     HeapRegion* hr = _regions.get_by_index(i);
 334     if (is_available(i)) {
 335       return hr;
 336     }
 337   }
 338   return NULL;
 339 }
 340 
 341 void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
 342   uint len = max_length();
 343 
 344   for (uint i = 0; i < len; i++) {
 345     if (!is_available(i)) {
 346       continue;
 347     }
 348     guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i);
 349     bool res = blk->do_heap_region(at(i));
 350     if (res) {
 351       blk->set_incomplete();
 352       return;
 353     }
 354   }
 355 }
 356 
 357 uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
 358   guarantee(res_idx != NULL, "checking");
 359   guarantee(start_idx <= (max_length() + 1), "checking");
 360 
 361   uint num_regions = 0;
 362 
 363   uint cur = start_idx;
 364   while (cur < max_length() && is_available(cur)) {
 365     cur++;
 366   }
 367   if (cur == max_length()) {
 368     return num_regions;
 369   }
 370   *res_idx = cur;
 371   while (cur < max_length() && !is_available(cur)) {
 372     cur++;
 373   }
 374   num_regions = cur - *res_idx;
 375 #ifdef ASSERT
 376   for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
 377     assert(!is_available(i), "just checking");
 378   }
 379   assert(cur == max_length() || num_regions == 0 || is_available(cur),
 380          "The region at the current position %u must be available or at the end of the heap.", cur);
 381 #endif
 382   return num_regions;
 383 }
 384 
 385 uint HeapRegionManager::find_highest_free(bool* expanded) {
 386   // Loop downwards from the highest region index, looking for an
 387   // entry which is either free or not yet committed.  If not yet
 388   // committed, expand_at that index.
 389   uint curr = max_length() - 1;
 390   while (true) {
 391     HeapRegion *hr = _regions.get_by_index(curr);
 392     if (hr == NULL || !is_available(curr)) {
 393       uint res = expand_at(curr, 1, NULL);
 394       if (res == 1) {
 395         *expanded = true;
 396         return curr;
 397       }
 398     } else {
 399       if (hr->is_free()) {
 400         *expanded = false;
 401         return curr;
 402       }
 403     }
 404     if (curr == 0) {
 405       return G1_NO_HRM_INDEX;
 406     }
 407     curr--;
 408   }
 409 }
 410 
 411 bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers) {
 412   size_t commits = 0;
 413   uint start_index = (uint)_regions.get_index_by_address(range.start());
 414   uint last_index = (uint)_regions.get_index_by_address(range.last());
 415 
 416   // Ensure that each G1 region in the range is free, returning false if not.
 417   // Commit those that are not yet available, and keep count.
 418   for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
 419     if (!is_available(curr_index)) {
 420       commits++;
 421       expand_at(curr_index, 1, pretouch_workers);
 422     }
 423     HeapRegion* curr_region  = _regions.get_by_index(curr_index);
 424     if (!curr_region->is_free()) {
 425       return false;
 426     }
 427   }
 428 
 429   allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
 430   *commit_count = commits;
 431   return true;
 432 }
 433 
 434 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const {
 435   // Every worker will actually look at all regions, skipping over regions that
 436   // are currently not committed.
 437   // This also (potentially) iterates over regions newly allocated during GC. This
 438   // is no problem except for some extra work.
 439   const uint n_regions = hrclaimer->n_regions();
 440   for (uint count = 0; count < n_regions; count++) {
 441     const uint index = (start_index + count) % n_regions;
 442     assert(index < n_regions, "sanity");
 443     // Skip over unavailable regions
 444     if (!is_available(index)) {
 445       continue;
 446     }
 447     HeapRegion* r = _regions.get_by_index(index);
 448     // We'll ignore regions already claimed.
 449     // However, if the iteration is specified as concurrent, the values for
 450     // is_starts_humongous and is_continues_humongous can not be trusted,
 451     // and we should just blindly iterate over regions regardless of their
 452     // humongous status.
 453     if (hrclaimer->is_region_claimed(index)) {
 454       continue;
 455     }
 456     // OK, try to claim it
 457     if (!hrclaimer->claim_region(index)) {
 458       continue;
 459     }
 460     bool res = blk->do_heap_region(r);
 461     if (res) {
 462       return;
 463     }
 464   }
 465 }
 466 
 467 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
 468   assert(length() > 0, "the region sequence should not be empty");
 469   assert(length() <= _allocated_heapregions_length, "invariant");
 470   assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
 471   assert(num_regions_to_remove < length(), "We should never remove all regions");
 472 
 473   if (num_regions_to_remove == 0) {
 474     return 0;
 475   }
 476 
 477   uint removed = 0;
 478   uint cur = _allocated_heapregions_length - 1;
 479   uint idx_last_found = 0;
 480   uint num_last_found = 0;
 481 
 482   while ((removed < num_regions_to_remove) &&
 483       (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
 484     uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
 485 
 486     shrink_at(idx_last_found + num_last_found - to_remove, to_remove);
 487 
 488     cur = idx_last_found;
 489     removed += to_remove;
 490   }
 491 
 492   verify_optional();
 493 
 494   return removed;
 495 }
 496 
 497 void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
 498 #ifdef ASSERT
 499   for (uint i = index; i < (index + num_regions); i++) {
 500     assert(is_available(i), "Expected available region at index %u", i);
 501     assert(at(i)->is_empty(), "Expected empty region at index %u", i);
 502     assert(at(i)->is_free(), "Expected free region at index %u", i);
 503   }
 504 #endif
 505   uncommit_regions(index, num_regions);
 506 }
 507 
 508 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
 509   guarantee(start_idx < _allocated_heapregions_length, "checking");
 510   guarantee(res_idx != NULL, "checking");
 511 
 512   uint num_regions_found = 0;
 513 
 514   jlong cur = start_idx;
 515   while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
 516     cur--;
 517   }
 518   if (cur == -1) {
 519     return num_regions_found;
 520   }
 521   jlong old_cur = cur;
 522   // cur indexes the first empty region
 523   while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
 524     cur--;
 525   }
 526   *res_idx = cur + 1;
 527   num_regions_found = old_cur - cur;
 528 
 529 #ifdef ASSERT
 530   for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
 531     assert(at(i)->is_empty(), "just checking");
 532   }
 533 #endif
 534   return num_regions_found;
 535 }
 536 
 537 void HeapRegionManager::verify() {
 538   guarantee(length() <= _allocated_heapregions_length,
 539             "invariant: _length: %u _allocated_length: %u",
 540             length(), _allocated_heapregions_length);
 541   guarantee(_allocated_heapregions_length <= max_length(),
 542             "invariant: _allocated_length: %u _max_length: %u",
 543             _allocated_heapregions_length, max_length());
 544 
 545   bool prev_committed = true;
 546   uint num_committed = 0;
 547   HeapWord* prev_end = heap_bottom();
 548   for (uint i = 0; i < _allocated_heapregions_length; i++) {
 549     if (!is_available(i)) {
 550       prev_committed = false;
 551       continue;
 552     }
 553     num_committed++;
 554     HeapRegion* hr = _regions.get_by_index(i);
 555     guarantee(hr != NULL, "invariant: i: %u", i);
 556     guarantee(!prev_committed || hr->bottom() == prev_end,
 557               "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT,
 558               i, HR_FORMAT_PARAMS(hr), p2i(prev_end));
 559     guarantee(hr->hrm_index() == i,
 560               "invariant: i: %u hrm_index(): %u", i, hr->hrm_index());
 561     // Asserts will fire if i is >= _length
 562     HeapWord* addr = hr->bottom();
 563     guarantee(addr_to_region(addr) == hr, "sanity");
 564     // We cannot check whether the region is part of a particular set: at the time
 565     // this method may be called, we have only completed allocation of the regions,
 566     // but not put into a region set.
 567     prev_committed = true;
 568     prev_end = hr->end();
 569   }
 570   for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
 571     guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
 572   }
 573 
 574   guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed);
 575   _free_list.verify();
 576 }
 577 
 578 #ifndef PRODUCT
 579 void HeapRegionManager::verify_optional() {
 580   verify();
 581 }
 582 #endif // PRODUCT
 583 
 584 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
 585     _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) {
 586   assert(n_workers > 0, "Need at least one worker.");
 587   uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
 588   memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
 589   _claims = new_claims;
 590 }
 591 
 592 HeapRegionClaimer::~HeapRegionClaimer() {
 593   FREE_C_HEAP_ARRAY(uint, _claims);
 594 }
 595 
 596 uint HeapRegionClaimer::offset_for_worker(uint worker_id) const {
 597   assert(worker_id < _n_workers, "Invalid worker_id.");
 598   return _n_regions * worker_id / _n_workers;
 599 }
 600 
 601 bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
 602   assert(region_index < _n_regions, "Invalid index.");
 603   return _claims[region_index] == Claimed;
 604 }
 605 
 606 bool HeapRegionClaimer::claim_region(uint region_index) {
 607   assert(region_index < _n_regions, "Invalid index.");
 608   uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed);
 609   return old_val == Unclaimed;
 610 }