1 /*
   2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Arguments.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/g1/g1ConcurrentRefine.hpp"
  29 #include "gc/g1/g1NUMAStats.hpp"
  30 #include "gc/g1/heapRegion.hpp"
  31 #include "gc/g1/heapRegionManager.inline.hpp"
  32 #include "gc/g1/heapRegionSet.inline.hpp"
  33 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  34 #include "logging/logStream.hpp"
  35 #include "memory/allocation.hpp"
  36 #include "runtime/atomic.hpp"
  37 #include "runtime/orderAccess.hpp"
  38 #include "utilities/bitMap.inline.hpp"
  39 
  40 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
  41 public:
  42   void check_mt_safety() {
  43     // Master Free List MT safety protocol:
  44     // (a) If we're at a safepoint, operations on the master free list
  45     // should be invoked by either the VM thread (which will serialize
  46     // them) or by the GC workers while holding the
  47     // FreeList_lock.
  48     // (b) If we're not at a safepoint, operations on the master free
  49     // list should be invoked while holding the Heap_lock.
  50 
  51     if (SafepointSynchronize::is_at_safepoint()) {
  52       guarantee(Thread::current()->is_VM_thread() ||
  53                 FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
  54     } else {
  55       guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
  56     }
  57   }
  58   bool is_correct_type(HeapRegion* hr) { return hr->is_free(); }
  59   const char* get_description() { return "Free Regions"; }
  60 };
  61 
  62 HeapRegionRange::HeapRegionRange(uint start, uint end) : _start(start), _end(end) {
  63   assert(start <= end, "Invariant");
  64 }
  65 
  66 HeapRegionManager::HeapRegionManager() :
  67   _bot_mapper(NULL),
  68   _cardtable_mapper(NULL),
  69   _card_counts_mapper(NULL),
  70   _available_map(mtGC),
  71   _num_committed(0),
  72   _allocated_heapregions_length(0),
  73   _regions(), _heap_mapper(NULL),
  74   _prev_bitmap_mapper(NULL),
  75   _next_bitmap_mapper(NULL),
  76   _free_list("Free list", new MasterFreeRegionListChecker())
  77 { }
  78 
  79 HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap) {
  80   if (G1Arguments::is_heterogeneous_heap()) {
  81     return new HeterogeneousHeapRegionManager((uint)(G1Arguments::heap_max_size_bytes() / HeapRegion::GrainBytes) /*heap size as num of regions*/);
  82   }
  83   return new HeapRegionManager();
  84 }
  85 
  86 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
  87                                G1RegionToSpaceMapper* prev_bitmap,
  88                                G1RegionToSpaceMapper* next_bitmap,
  89                                G1RegionToSpaceMapper* bot,
  90                                G1RegionToSpaceMapper* cardtable,
  91                                G1RegionToSpaceMapper* card_counts) {
  92   _allocated_heapregions_length = 0;
  93 
  94   _heap_mapper = heap_storage;
  95 
  96   _prev_bitmap_mapper = prev_bitmap;
  97   _next_bitmap_mapper = next_bitmap;
  98 
  99   _bot_mapper = bot;
 100   _cardtable_mapper = cardtable;
 101 
 102   _card_counts_mapper = card_counts;
 103 
 104   MemRegion reserved = heap_storage->reserved();
 105   _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
 106 
 107   _available_map.initialize(_regions.length());
 108 }
 109 
 110 bool HeapRegionManager::is_available(uint region) const {
 111   return _available_map.at(region);
 112 }
 113 
 114 HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
 115   HeapRegion* hr = NULL;
 116   bool from_head = !type.is_young();
 117   G1NUMA* numa = G1NUMA::numa();
 118 
 119   if (requested_node_index != G1NUMA::AnyNodeIndex && numa->is_enabled()) {
 120     // Try to allocate with requested node index.
 121     hr = _free_list.remove_region_with_node_index(from_head, requested_node_index);
 122   }
 123 
 124   if (hr == NULL) {
 125     // If there's a single active node or we did not get a region from our requested node,
 126     // try without requested node index.
 127     hr = _free_list.remove_region(from_head);
 128   }
 129 
 130   if (hr != NULL) {
 131     assert(hr->next() == NULL, "Single region should not have next");
 132     assert(is_available(hr->hrm_index()), "Must be committed");
 133 
 134     if (numa->is_enabled() && hr->node_index() < numa->num_active_nodes()) {
 135       numa->update_statistics(G1NUMAStats::NewRegionAlloc, requested_node_index, hr->node_index());
 136     }
 137   }
 138 
 139   return hr;
 140 }
 141 
 142 HeapRegion* HeapRegionManager::allocate_humongous_from_free_list(uint num_regions) {
 143   uint candidate = find_contiguous_in_free_list(num_regions);
 144   if (candidate == G1_NO_HRM_INDEX) {
 145     return NULL;
 146   }
 147   return allocate_free_regions_starting_at(candidate, num_regions);
 148 }
 149 
 150 HeapRegion* HeapRegionManager::allocate_humongous_allow_expand(uint num_regions) {
 151   uint candidate = find_contiguous_allow_expand(num_regions);
 152   if (candidate == G1_NO_HRM_INDEX) {
 153     return NULL;
 154   }
 155   expand_exact(candidate, num_regions, G1CollectedHeap::heap()->workers());
 156   return allocate_free_regions_starting_at(candidate, num_regions);
 157 }
 158 
 159 HeapRegion* HeapRegionManager::allocate_humongous(uint num_regions) {
 160   // Special case a single region to avoid expensive search.
 161   if (num_regions == 1) {
 162     return allocate_free_region(HeapRegionType::Humongous, G1NUMA::AnyNodeIndex);
 163   }
 164   return allocate_humongous_from_free_list(num_regions);
 165 }
 166 
 167 HeapRegion* HeapRegionManager::expand_and_allocate_humongous(uint num_regions) {
 168   return allocate_humongous_allow_expand(num_regions);
 169 }
 170 
 171 #ifdef ASSERT
 172 bool HeapRegionManager::is_free(HeapRegion* hr) const {
 173   return _free_list.contains(hr);
 174 }
 175 #endif
 176 
 177 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
 178   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 179   HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
 180   MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
 181   assert(reserved().contains(mr), "invariant");
 182   return g1h->new_heap_region(hrm_index, mr);
 183 }
 184 
 185 void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) {
 186   guarantee(num_regions > 0, "Must commit more than zero regions");
 187   guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
 188 
 189   _num_committed += (uint)num_regions;
 190 
 191   _heap_mapper->commit_regions(index, num_regions, pretouch_gang);
 192 
 193   // Also commit auxiliary data
 194   _prev_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
 195   _next_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang);
 196 
 197   _bot_mapper->commit_regions(index, num_regions, pretouch_gang);
 198   _cardtable_mapper->commit_regions(index, num_regions, pretouch_gang);
 199 
 200   _card_counts_mapper->commit_regions(index, num_regions, pretouch_gang);
 201 }
 202 
 203 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
 204   guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start);
 205   guarantee(_num_committed >= num_regions, "pre-condition");
 206 
 207   // Reset node index to distinguish with committed regions.
 208   for (uint i = start; i < start + num_regions; i++) {
 209     at(i)->set_node_index(G1NUMA::UnknownNodeIndex);
 210   }
 211 
 212   // Print before uncommitting.
 213   if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
 214     for (uint i = start; i < start + num_regions; i++) {
 215       HeapRegion* hr = at(i);
 216       G1CollectedHeap::heap()->hr_printer()->uncommit(hr);
 217     }
 218   }
 219 
 220   _num_committed -= (uint)num_regions;
 221 
 222   _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
 223   _heap_mapper->uncommit_regions(start, num_regions);
 224 
 225   // Also uncommit auxiliary data
 226   _prev_bitmap_mapper->uncommit_regions(start, num_regions);
 227   _next_bitmap_mapper->uncommit_regions(start, num_regions);
 228 
 229   _bot_mapper->uncommit_regions(start, num_regions);
 230   _cardtable_mapper->uncommit_regions(start, num_regions);
 231 
 232   _card_counts_mapper->uncommit_regions(start, num_regions);
 233 }
 234 
 235 void HeapRegionManager::make_regions_available(uint start, uint num_regions, WorkGang* pretouch_gang) {
 236   guarantee(num_regions > 0, "No point in calling this for zero regions");
 237   commit_regions(start, num_regions, pretouch_gang);
 238   for (uint i = start; i < start + num_regions; i++) {
 239     if (_regions.get_by_index(i) == NULL) {
 240       HeapRegion* new_hr = new_heap_region(i);
 241       OrderAccess::storestore();
 242       _regions.set_by_index(i, new_hr);
 243       _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
 244     }
 245   }
 246 
 247   _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
 248 
 249   for (uint i = start; i < start + num_regions; i++) {
 250     assert(is_available(i), "Just made region %u available but is apparently not.", i);
 251     HeapRegion* hr = at(i);
 252     if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
 253       G1CollectedHeap::heap()->hr_printer()->commit(hr);
 254     }
 255 
 256     hr->initialize();
 257     hr->set_node_index(G1NUMA::numa()->index_for_region(hr));
 258     insert_into_free_list(at(i));
 259   }
 260 }
 261 
 262 MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
 263   size_t used_sz =
 264     _prev_bitmap_mapper->committed_size() +
 265     _next_bitmap_mapper->committed_size() +
 266     _bot_mapper->committed_size() +
 267     _cardtable_mapper->committed_size() +
 268     _card_counts_mapper->committed_size();
 269 
 270   size_t committed_sz =
 271     _prev_bitmap_mapper->reserved_size() +
 272     _next_bitmap_mapper->reserved_size() +
 273     _bot_mapper->reserved_size() +
 274     _cardtable_mapper->reserved_size() +
 275     _card_counts_mapper->reserved_size();
 276 
 277   return MemoryUsage(0, used_sz, committed_sz, committed_sz);
 278 }
 279 
 280 uint HeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) {
 281   return expand_at(0, num_regions, pretouch_workers);
 282 }
 283 
 284 uint HeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) {
 285   if (num_regions == 0) {
 286     return 0;
 287   }
 288 
 289   uint offset = start;
 290   uint expanded = 0;
 291 
 292   do {
 293     HeapRegionRange regions = find_unavailable_from_idx(offset);
 294     if (regions.length() == 0) {
 295       // No more unavailable regions.
 296       break;
 297     }
 298 
 299     uint to_expand = MIN2(num_regions - expanded, regions.length());
 300     make_regions_available(regions.start(), to_expand, pretouch_workers);
 301     expanded += to_expand;
 302     offset = regions.end();
 303   } while (expanded < num_regions);
 304 
 305   verify_optional();
 306   return expanded;
 307 }
 308 
 309 void HeapRegionManager::expand_exact(uint start, uint num_regions, WorkGang* pretouch_workers) {
 310   assert(num_regions != 0, "Need to request at least one region");
 311   uint end = start + num_regions;
 312 
 313   for (uint i = start; i < end; i++) {
 314     if (!is_available(i)) {
 315       make_regions_available(i, 1, pretouch_workers);
 316     }
 317   }
 318 
 319   verify_optional();
 320 }
 321 
 322 uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) {
 323   uint expand_candidate = UINT_MAX;
 324   for (uint i = 0; i < max_length(); i++) {
 325     if (is_available(i)) {
 326       // Already in use continue
 327       continue;
 328     }
 329     // Always save the candidate so we can expand later on.
 330     expand_candidate = i;
 331     if (is_on_preferred_index(expand_candidate, preferred_index)) {
 332       // We have found a candidate on the preffered node, break.
 333       break;
 334     }
 335   }
 336 
 337   if (expand_candidate == UINT_MAX) {
 338      // No regions left, expand failed.
 339     return 0;
 340   }
 341 
 342   expand_exact(expand_candidate, 1, NULL);
 343   return 1;
 344 }
 345 
 346 bool HeapRegionManager::is_on_preferred_index(uint region_index, uint preferred_node_index) {
 347   uint region_node_index = G1NUMA::numa()->preferred_node_index_for_index(region_index);
 348   return region_node_index == preferred_node_index;
 349 }
 350 
 351 #ifdef ASSERT
 352 void HeapRegionManager::assert_contiguous_range(uint start, uint num_regions) {
 353   // General sanity check, regions found should either be available and empty
 354   // or not available so that we can make them available and use them.
 355   for (uint i = start; i < (start + num_regions); i++) {
 356     HeapRegion* hr = _regions.get_by_index(i);
 357     assert(!is_available(i) || hr->is_free(),
 358            "Found region sequence starting at " UINT32_FORMAT ", length " UINT32_FORMAT
 359            " that is not free at " UINT32_FORMAT ". Hr is " PTR_FORMAT ", type is %s",
 360            start, num_regions, i, p2i(hr), hr->get_type_str());
 361   }
 362 }
 363 #endif
 364 
 365 uint HeapRegionManager::find_contiguous_in_range(uint start, uint end, uint num_regions) {
 366   assert(start <= end, "precondition");
 367   assert(num_regions >= 1, "precondition");
 368   uint candidate = start;       // First region in candidate sequence.
 369   uint unchecked = candidate;   // First unchecked region in candidate.
 370   // While the candidate sequence fits in the range...
 371   while (num_regions <= (end - candidate)) {
 372     // Walk backward over the regions for the current candidate.
 373     for (uint i = candidate + num_regions - 1; true; --i) {
 374       if (is_available(i) && !at(i)->is_free()) {
 375         // Region i can't be used, so restart with i+1 as the start
 376         // of a new candidate sequence, and with the region after the
 377         // old candidate sequence being the first unchecked region.
 378         unchecked = candidate + num_regions;
 379         candidate = i + 1;
 380         break;
 381       } else if (i == unchecked) {
 382         // All regions of candidate sequence have passed check.
 383         assert_contiguous_range(candidate, num_regions);
 384         return candidate;
 385       }
 386     }
 387   }
 388   return G1_NO_HRM_INDEX;
 389 }
 390 
 391 uint HeapRegionManager::find_contiguous_in_free_list(uint num_regions) {
 392   BitMap::idx_t range_start = 0;
 393   BitMap::idx_t range_end = range_start;
 394   uint candidate = G1_NO_HRM_INDEX;
 395 
 396   do {
 397     range_start = _available_map.get_next_one_offset(range_end);
 398     range_end = _available_map.get_next_zero_offset(range_start);
 399     candidate = find_contiguous_in_range((uint) range_start, (uint) range_end, num_regions);
 400   } while (candidate == G1_NO_HRM_INDEX && range_end < max_length());
 401 
 402   return candidate;
 403 }
 404 
 405 uint HeapRegionManager::find_contiguous_allow_expand(uint num_regions) {
 406   // Find any candidate.
 407   return find_contiguous_in_range(0, max_length(), num_regions);
 408 }
 409 
 410 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
 411   guarantee(r != NULL, "Start region must be a valid region");
 412   guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index());
 413   for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
 414     HeapRegion* hr = _regions.get_by_index(i);
 415     if (is_available(i)) {
 416       return hr;
 417     }
 418   }
 419   return NULL;
 420 }
 421 
 422 void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
 423   uint len = max_length();
 424 
 425   for (uint i = 0; i < len; i++) {
 426     if (!is_available(i)) {
 427       continue;
 428     }
 429     guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i);
 430     bool res = blk->do_heap_region(at(i));
 431     if (res) {
 432       blk->set_incomplete();
 433       return;
 434     }
 435   }
 436 }
 437 
 438 HeapRegionRange HeapRegionManager::find_unavailable_from_idx(uint index) const {
 439   guarantee(index <= max_length(), "checking");
 440 
 441   // Find first unavailable region from offset.
 442   BitMap::idx_t start = _available_map.get_next_zero_offset(index);
 443   if (start == _available_map.size()) {
 444     // No unavailable regions found.
 445     return HeapRegionRange(max_length(), max_length());
 446   }
 447 
 448   // The end of the range is the next available region.
 449   BitMap::idx_t end = _available_map.get_next_one_offset(start);
 450 
 451   assert(!_available_map.at(start), "Found region (" SIZE_FORMAT ") is not unavailable", start);
 452   assert(!_available_map.at(end - 1), "Last region (" SIZE_FORMAT ") in range is not unavailable", end - 1);
 453   assert(end == _available_map.size() || _available_map.at(end), "Region (" SIZE_FORMAT ") is not available", end);
 454 
 455   return HeapRegionRange((uint) start, (uint) end);
 456 }
 457 
 458 uint HeapRegionManager::find_highest_free(bool* expanded) {
 459   // Loop downwards from the highest region index, looking for an
 460   // entry which is either free or not yet committed.  If not yet
 461   // committed, expand_at that index.
 462   uint curr = max_length() - 1;
 463   while (true) {
 464     HeapRegion *hr = _regions.get_by_index(curr);
 465     if (hr == NULL || !is_available(curr)) {
 466       uint res = expand_at(curr, 1, NULL);
 467       if (res == 1) {
 468         *expanded = true;
 469         return curr;
 470       }
 471     } else {
 472       if (hr->is_free()) {
 473         *expanded = false;
 474         return curr;
 475       }
 476     }
 477     if (curr == 0) {
 478       return G1_NO_HRM_INDEX;
 479     }
 480     curr--;
 481   }
 482 }
 483 
 484 bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers) {
 485   size_t commits = 0;
 486   uint start_index = (uint)_regions.get_index_by_address(range.start());
 487   uint last_index = (uint)_regions.get_index_by_address(range.last());
 488 
 489   // Ensure that each G1 region in the range is free, returning false if not.
 490   // Commit those that are not yet available, and keep count.
 491   for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
 492     if (!is_available(curr_index)) {
 493       commits++;
 494       expand_at(curr_index, 1, pretouch_workers);
 495     }
 496     HeapRegion* curr_region  = _regions.get_by_index(curr_index);
 497     if (!curr_region->is_free()) {
 498       return false;
 499     }
 500   }
 501 
 502   allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
 503   *commit_count = commits;
 504   return true;
 505 }
 506 
 507 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const {
 508   // Every worker will actually look at all regions, skipping over regions that
 509   // are currently not committed.
 510   // This also (potentially) iterates over regions newly allocated during GC. This
 511   // is no problem except for some extra work.
 512   const uint n_regions = hrclaimer->n_regions();
 513   for (uint count = 0; count < n_regions; count++) {
 514     const uint index = (start_index + count) % n_regions;
 515     assert(index < n_regions, "sanity");
 516     // Skip over unavailable regions
 517     if (!is_available(index)) {
 518       continue;
 519     }
 520     HeapRegion* r = _regions.get_by_index(index);
 521     // We'll ignore regions already claimed.
 522     // However, if the iteration is specified as concurrent, the values for
 523     // is_starts_humongous and is_continues_humongous can not be trusted,
 524     // and we should just blindly iterate over regions regardless of their
 525     // humongous status.
 526     if (hrclaimer->is_region_claimed(index)) {
 527       continue;
 528     }
 529     // OK, try to claim it
 530     if (!hrclaimer->claim_region(index)) {
 531       continue;
 532     }
 533     bool res = blk->do_heap_region(r);
 534     if (res) {
 535       return;
 536     }
 537   }
 538 }
 539 
 540 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
 541   assert(length() > 0, "the region sequence should not be empty");
 542   assert(length() <= _allocated_heapregions_length, "invariant");
 543   assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
 544   assert(num_regions_to_remove < length(), "We should never remove all regions");
 545 
 546   if (num_regions_to_remove == 0) {
 547     return 0;
 548   }
 549 
 550   uint removed = 0;
 551   uint cur = _allocated_heapregions_length - 1;
 552   uint idx_last_found = 0;
 553   uint num_last_found = 0;
 554 
 555   while ((removed < num_regions_to_remove) &&
 556       (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
 557     uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
 558 
 559     shrink_at(idx_last_found + num_last_found - to_remove, to_remove);
 560 
 561     cur = idx_last_found;
 562     removed += to_remove;
 563   }
 564 
 565   verify_optional();
 566 
 567   return removed;
 568 }
 569 
 570 void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
 571 #ifdef ASSERT
 572   for (uint i = index; i < (index + num_regions); i++) {
 573     assert(is_available(i), "Expected available region at index %u", i);
 574     assert(at(i)->is_empty(), "Expected empty region at index %u", i);
 575     assert(at(i)->is_free(), "Expected free region at index %u", i);
 576   }
 577 #endif
 578   uncommit_regions(index, num_regions);
 579 }
 580 
 581 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
 582   guarantee(start_idx < _allocated_heapregions_length, "checking");
 583   guarantee(res_idx != NULL, "checking");
 584 
 585   uint num_regions_found = 0;
 586 
 587   jlong cur = start_idx;
 588   while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
 589     cur--;
 590   }
 591   if (cur == -1) {
 592     return num_regions_found;
 593   }
 594   jlong old_cur = cur;
 595   // cur indexes the first empty region
 596   while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
 597     cur--;
 598   }
 599   *res_idx = cur + 1;
 600   num_regions_found = old_cur - cur;
 601 
 602 #ifdef ASSERT
 603   for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
 604     assert(at(i)->is_empty(), "just checking");
 605   }
 606 #endif
 607   return num_regions_found;
 608 }
 609 
 610 void HeapRegionManager::verify() {
 611   guarantee(length() <= _allocated_heapregions_length,
 612             "invariant: _length: %u _allocated_length: %u",
 613             length(), _allocated_heapregions_length);
 614   guarantee(_allocated_heapregions_length <= max_length(),
 615             "invariant: _allocated_length: %u _max_length: %u",
 616             _allocated_heapregions_length, max_length());
 617 
 618   bool prev_committed = true;
 619   uint num_committed = 0;
 620   HeapWord* prev_end = heap_bottom();
 621   for (uint i = 0; i < _allocated_heapregions_length; i++) {
 622     if (!is_available(i)) {
 623       prev_committed = false;
 624       continue;
 625     }
 626     num_committed++;
 627     HeapRegion* hr = _regions.get_by_index(i);
 628     guarantee(hr != NULL, "invariant: i: %u", i);
 629     guarantee(!prev_committed || hr->bottom() == prev_end,
 630               "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT,
 631               i, HR_FORMAT_PARAMS(hr), p2i(prev_end));
 632     guarantee(hr->hrm_index() == i,
 633               "invariant: i: %u hrm_index(): %u", i, hr->hrm_index());
 634     // Asserts will fire if i is >= _length
 635     HeapWord* addr = hr->bottom();
 636     guarantee(addr_to_region(addr) == hr, "sanity");
 637     // We cannot check whether the region is part of a particular set: at the time
 638     // this method may be called, we have only completed allocation of the regions,
 639     // but not put into a region set.
 640     prev_committed = true;
 641     prev_end = hr->end();
 642   }
 643   for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
 644     guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
 645   }
 646 
 647   guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed);
 648   _free_list.verify();
 649 }
 650 
 651 #ifndef PRODUCT
 652 void HeapRegionManager::verify_optional() {
 653   verify();
 654 }
 655 #endif // PRODUCT
 656 
 657 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
 658     _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) {
 659   assert(n_workers > 0, "Need at least one worker.");
 660   uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
 661   memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
 662   _claims = new_claims;
 663 }
 664 
 665 HeapRegionClaimer::~HeapRegionClaimer() {
 666   FREE_C_HEAP_ARRAY(uint, _claims);
 667 }
 668 
 669 uint HeapRegionClaimer::offset_for_worker(uint worker_id) const {
 670   assert(worker_id < _n_workers, "Invalid worker_id.");
 671   return _n_regions * worker_id / _n_workers;
 672 }
 673 
 674 bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
 675   assert(region_index < _n_regions, "Invalid index.");
 676   return _claims[region_index] == Claimed;
 677 }
 678 
 679 bool HeapRegionClaimer::claim_region(uint region_index) {
 680   assert(region_index < _n_regions, "Invalid index.");
 681   uint old_val = Atomic::cmpxchg(&_claims[region_index], Unclaimed, Claimed);
 682   return old_val == Unclaimed;
 683 }
 684 
 685 class G1RebuildFreeListTask : public AbstractGangTask {
 686   HeapRegionManager* _hrm;
 687   FreeRegionList*    _worker_freelists;
 688   uint               _worker_chunk_size;
 689   uint               _num_workers;
 690 
 691 public:
 692   G1RebuildFreeListTask(HeapRegionManager* hrm, uint num_workers) :
 693       AbstractGangTask("G1 Rebuild Free List Task"),
 694       _hrm(hrm),
 695       _worker_freelists(NEW_C_HEAP_ARRAY(FreeRegionList, num_workers, mtGC)),
 696       _worker_chunk_size((_hrm->max_length() + num_workers - 1) / num_workers),
 697       _num_workers(num_workers) {
 698     for (uint worker = 0; worker < _num_workers; worker++) {
 699       ::new (&_worker_freelists[worker]) FreeRegionList("Appendable Worker Free List");
 700     }
 701   }
 702 
 703   ~G1RebuildFreeListTask() {
 704     for (uint worker = 0; worker < _num_workers; worker++) {
 705       _worker_freelists[worker].~FreeRegionList();
 706     }
 707     FREE_C_HEAP_ARRAY(FreeRegionList, _worker_freelists);
 708   }
 709 
 710   FreeRegionList* worker_freelist(uint worker) {
 711     return &_worker_freelists[worker];
 712   }
 713 
 714   // Each worker creates a free list for a chunk of the heap. The chunks won't
 715   // be overlapping so we don't need to do any claiming.
 716   void work(uint worker_id) {
 717     Ticks start_time = Ticks::now();
 718     EventGCPhaseParallel event;
 719 
 720     uint start = worker_id * _worker_chunk_size;
 721     uint end = MIN2(start + _worker_chunk_size, _hrm->max_length());
 722 
 723     // If start is outside the heap, this worker has nothing to do.
 724     if (start > end) {
 725       return;
 726     }
 727 
 728     FreeRegionList *free_list = worker_freelist(worker_id);
 729     for (uint i = start; i < end; i++) {
 730       HeapRegion *region = _hrm->at_or_null(i);
 731       if (region != NULL && region->is_free()) {
 732         // Need to clear old links to allow to be added to new freelist.
 733         region->unlink_from_list();
 734         free_list->add_to_tail(region);
 735       }
 736     }
 737 
 738     event.commit(GCId::current(), worker_id, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::RebuildFreeList));
 739     G1CollectedHeap::heap()->phase_times()->record_time_secs(G1GCPhaseTimes::RebuildFreeList, worker_id, (Ticks::now() - start_time).seconds());
 740   }
 741 };
 742 
 743 void HeapRegionManager::rebuild_free_list(WorkGang* workers) {
 744   // Abandon current free list to allow a rebuild.
 745   _free_list.abandon();
 746 
 747   uint const num_workers = clamp(max_length(), 1u, workers->active_workers());
 748   G1RebuildFreeListTask task(this, num_workers);
 749 
 750   log_debug(gc, ergo)("Running %s using %u workers for rebuilding free list of regions",
 751                       task.name(), num_workers);
 752   workers->run_task(&task, num_workers);
 753 
 754   // Link the partial free lists together.
 755   Ticks serial_time = Ticks::now();
 756   for (uint worker = 0; worker < num_workers; worker++) {
 757     _free_list.append_ordered(task.worker_freelist(worker));
 758   }
 759   G1CollectedHeap::heap()->phase_times()->record_serial_rebuild_freelist_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
 760 }