src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 6540 : 8054819: Rename HeapRegionSeq to HeapRegionManager
Reviewed-by: jwilhelm, jmasa

@@ -526,13 +526,13 @@
       // It looks as if there are free regions available on the
       // secondary_free_list. Let's move them to the free_list and try
       // again to allocate from it.
       append_secondary_free_list();
 
-      assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not "
+      assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
              "empty we should have moved at least one entry to the free_list");
-      HeapRegion* res = _hrs.allocate_free_region(is_old);
+      HeapRegion* res = _hrm.allocate_free_region(is_old);
       if (G1ConcRegionFreeingVerbose) {
         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
                                "allocated "HR_FORMAT" from secondary_free_list",
                                HR_FORMAT_PARAMS(res));
       }

@@ -569,11 +569,11 @@
         return res;
       }
     }
   }
 
-  res = _hrs.allocate_free_region(is_old);
+  res = _hrm.allocate_free_region(is_old);
 
   if (res == NULL) {
     if (G1ConcRegionFreeingVerbose) {
       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
                              "res == NULL, trying the secondary_free_list");

@@ -595,11 +595,11 @@
     if (expand(word_size * HeapWordSize)) {
       // Given that expand() succeeded in expanding the heap, and we
       // always expand the heap by an amount aligned to the heap
       // region size, the free list should in theory not be empty.
       // In either case allocate_free_region() will check for NULL.
-      res = _hrs.allocate_free_region(is_old);
+      res = _hrm.allocate_free_region(is_old);
     } else {
       _expand_heap_after_alloc_failure = false;
     }
   }
   return res;

@@ -607,11 +607,11 @@
 
 HeapWord*
 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
                                                            uint num_regions,
                                                            size_t word_size) {
-  assert(first != G1_NO_HRS_INDEX, "pre-condition");
+  assert(first != G1_NO_HRM_INDEX, "pre-condition");
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
   // Index of last region in the series + 1.
   uint last = first + num_regions;

@@ -745,20 +745,20 @@
 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   verify_region_sets_optional();
 
-  uint first = G1_NO_HRS_INDEX;
+  uint first = G1_NO_HRM_INDEX;
   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 
   if (obj_regions == 1) {
     // Only one region to allocate, try to use a fast path by directly allocating
     // from the free lists. Do not try to expand here, we will potentially do that
     // later.
     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
     if (hr != NULL) {
-      first = hr->hrs_index();
+      first = hr->hrm_index();
     }
   } else {
     // We can't allocate humongous regions spanning more than one region while
     // cleanupComplete() is running, since some of the regions we find to be
     // empty might not yet be added to the free list. It is not straightforward

@@ -770,48 +770,48 @@
     wait_while_free_regions_coming();
     append_secondary_free_list_if_not_empty_with_lock();
 
     // Policy: Try only empty regions (i.e. already committed first). Maybe we
     // are lucky enough to find some.
-    first = _hrs.find_contiguous_only_empty(obj_regions);
-    if (first != G1_NO_HRS_INDEX) {
-      _hrs.allocate_free_regions_starting_at(first, obj_regions);
+    first = _hrm.find_contiguous_only_empty(obj_regions);
+    if (first != G1_NO_HRM_INDEX) {
+      _hrm.allocate_free_regions_starting_at(first, obj_regions);
     }
   }
 
-  if (first == G1_NO_HRS_INDEX) {
+  if (first == G1_NO_HRM_INDEX) {
     // Policy: We could not find enough regions for the humongous object in the
     // free list. Look through the heap to find a mix of free and uncommitted regions.
     // If so, try expansion.
-    first = _hrs.find_contiguous_empty_or_unavailable(obj_regions);
-    if (first != G1_NO_HRS_INDEX) {
+    first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
+    if (first != G1_NO_HRM_INDEX) {
       // We found something. Make sure these regions are committed, i.e. expand
       // the heap. Alternatively we could do a defragmentation GC.
       ergo_verbose1(ErgoHeapSizing,
                     "attempt heap expansion",
                     ergo_format_reason("humongous allocation request failed")
                     ergo_format_byte("allocation request"),
                     word_size * HeapWordSize);
 
-      _hrs.expand_at(first, obj_regions);
+      _hrm.expand_at(first, obj_regions);
       g1_policy()->record_new_heap_size(num_regions());
 
 #ifdef ASSERT
       for (uint i = first; i < first + obj_regions; ++i) {
         HeapRegion* hr = region_at(i);
         assert(hr->is_empty(), "sanity");
         assert(is_on_master_free_list(hr), "sanity");
       }
 #endif
-      _hrs.allocate_free_regions_starting_at(first, obj_regions);
+      _hrm.allocate_free_regions_starting_at(first, obj_regions);
     } else {
       // Policy: Potentially trigger a defragmentation GC.
     }
   }
 
   HeapWord* result = NULL;
-  if (first != G1_NO_HRS_INDEX) {
+  if (first != G1_NO_HRM_INDEX) {
     result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
     assert(result != NULL, "it should always return a valid result");
 
     // A successful humongous object allocation changes the used space
     // information of the old generation so we need to recalculate the

@@ -1242,11 +1242,11 @@
 
   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
     : _hr_printer(hr_printer) { }
 };
 
-void G1CollectedHeap::print_hrs_post_compaction() {
+void G1CollectedHeap::print_hrm_post_compaction() {
   PostCompactionPrinterClosure cl(hr_printer());
   heap_region_iterate(&cl);
 }
 
 bool G1CollectedHeap::do_collection(bool explicit_gc,

@@ -1411,11 +1411,11 @@
       if (_hr_printer.is_active()) {
         // We should do this after we potentially resize the heap so
         // that all the COMMIT / UNCOMMIT events are generated before
         // the end GC event.
 
-        print_hrs_post_compaction();
+        print_hrm_post_compaction();
         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
       }
 
       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
       if (hot_card_cache->use_cache()) {

@@ -1484,11 +1484,11 @@
              "young list should be empty at this point");
 
       // Update the number of full collections that have been completed.
       increment_old_marking_cycles_completed(false /* concurrent */);
 
-      _hrs.verify_optional();
+      _hrm.verify_optional();
       verify_region_sets_optional();
 
       verify_after_gc();
 
       // Clear the previous marking bitmap, if needed for bitmap verification.

@@ -1728,11 +1728,11 @@
                 "attempt heap expansion",
                 ergo_format_reason("allocation request failed")
                 ergo_format_byte("allocation request"),
                 word_size * HeapWordSize);
   if (expand(expand_bytes)) {
-    _hrs.verify_optional();
+    _hrm.verify_optional();
     verify_region_sets_optional();
     return attempt_allocation_at_safepoint(word_size,
                                  false /* expect_null_mutator_alloc_region */);
   }
   return NULL;

@@ -1756,11 +1756,11 @@
   }
 
   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
   assert(regions_to_expand > 0, "Must expand by at least one region");
 
-  uint expanded_by = _hrs.expand_by(regions_to_expand);
+  uint expanded_by = _hrm.expand_by(regions_to_expand);
 
   if (expanded_by > 0) {
     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
     g1_policy()->record_new_heap_size(num_regions());

@@ -1769,11 +1769,11 @@
                   "did not expand the heap",
                   ergo_format_reason("heap expansion operation failed"));
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
     if (G1ExitOnExpansionFailure &&
-        _hrs.available() >= regions_to_expand) {
+        _hrm.available() >= regions_to_expand) {
       // We had head room...
       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
     }
   }
   return regions_to_expand > 0;

@@ -1784,11 +1784,11 @@
     ReservedSpace::page_align_size_down(shrink_bytes);
   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
                                          HeapRegion::GrainBytes);
   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
 
-  uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
+  uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
   ergo_verbose3(ErgoHeapSizing,
                 "shrink the heap",
                 ergo_format_byte("requested shrinking amount")

@@ -1817,11 +1817,11 @@
   // remove only the ones that we need to remove.
   tear_down_region_sets(true /* free_list_only */);
   shrink_helper(shrink_bytes);
   rebuild_region_sets(true /* free_list_only */);
 
-  _hrs.verify_optional();
+  _hrm.verify_optional();
   verify_region_sets_optional();
 }
 
 // Public methods.
 

@@ -2026,11 +2026,11 @@
                                          os::vm_page_size(),
                                          HeapRegion::GrainBytes,
                                          CMBitMap::mark_distance(),
                                          mtGC);
 
-  _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
+  _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
   g1_barrier_set()->initialize(cardtable_storage);
    // Do later initialization work for concurrent refinement.
   _cg1r->init(card_counts_storage);
 
   // 6843694 - ensure that the maximum region index can fit

@@ -2047,12 +2047,12 @@
 
   _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
 
   _g1h = this;
 
-  _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
-  _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
+  _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
+  _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
 
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
   _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
   if (_cm == NULL || !_cm->completed_initialization()) {

@@ -2109,11 +2109,11 @@
   // counts and that mechanism.
   SpecializationStats::clear();
 
   // Here we allocate the dummy HeapRegion that is required by the
   // G1AllocRegion class.
-  HeapRegion* dummy_region = _hrs.get_dummy_region();
+  HeapRegion* dummy_region = _hrm.get_dummy_region();
 
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
   // region will complain that it cannot support allocations without
   // BOT updates. So we'll tag the dummy region as young to avoid that.

@@ -2226,18 +2226,18 @@
                                 // is alive closure
                                 // (for efficiency/performance)
 }
 
 size_t G1CollectedHeap::capacity() const {
-  return _hrs.length() * HeapRegion::GrainBytes;
+  return _hrm.length() * HeapRegion::GrainBytes;
 }
 
 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
   assert(!hr->continuesHumongous(), "pre-condition");
   hr->reset_gc_time_stamp();
   if (hr->startsHumongous()) {
-    uint first_index = hr->hrs_index() + 1;
+    uint first_index = hr->hrm_index() + 1;
     uint last_index = hr->last_hc_index();
     for (uint i = first_index; i < last_index; i += 1) {
       HeapRegion* chr = region_at(i);
       assert(chr->continuesHumongous(), "sanity");
       chr->reset_gc_time_stamp();

@@ -2531,11 +2531,11 @@
     }
   } while (retry_gc);
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  if (_hrs.reserved().contains(p)) {
+  if (_hrm.reserved().contains(p)) {
     // Given that we know that p is in the reserved space,
     // heap_region_containing_raw() should successfully
     // return the containing region.
     HeapRegion* hr = heap_region_containing_raw(p);
     return hr->is_in(p);

@@ -2545,11 +2545,11 @@
 }
 
 #ifdef ASSERT
 bool G1CollectedHeap::is_in_exact(const void* p) const {
   bool contains = reserved_region().contains(p);
-  bool available = _hrs.is_available(addr_to_region((HeapWord*)p));
+  bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
   if (contains && available) {
     return true;
   } else {
     return false;
   }

@@ -2612,19 +2612,19 @@
   SpaceClosureRegionClosure blk(cl);
   heap_region_iterate(&blk);
 }
 
 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
-  _hrs.iterate(cl);
+  _hrm.iterate(cl);
 }
 
 void
 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
                                                  uint worker_id,
                                                  uint num_workers,
                                                  jint claim_value) const {
-  _hrs.par_iterate(cl, worker_id, num_workers, claim_value);
+  _hrm.par_iterate(cl, worker_id, num_workers, claim_value);
 }
 
 class ResetClaimValuesClosure: public HeapRegionClosure {
 public:
   bool doHeapRegion(HeapRegion* r) {

@@ -2840,13 +2840,13 @@
     cur = next;
   }
 }
 
 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
-  HeapRegion* result = _hrs.next_region_in_heap(from);
+  HeapRegion* result = _hrm.next_region_in_heap(from);
   while (result != NULL && result->isHumongous()) {
-    result = _hrs.next_region_in_heap(result);
+    result = _hrm.next_region_in_heap(result);
   }
   return result;
 }
 
 Space* G1CollectedHeap::space_containing(const void* addr) const {

@@ -2902,11 +2902,11 @@
     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
   }
 }
 
 size_t G1CollectedHeap::max_capacity() const {
-  return _hrs.reserved().byte_size();
+  return _hrm.reserved().byte_size();
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
   // assert(false, "NYI");
   return 0;

@@ -3431,13 +3431,13 @@
 void G1CollectedHeap::print_on(outputStream* st) const {
   st->print(" %-20s", "garbage-first heap");
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
             capacity()/K, used_unlocked()/K);
   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
-            _hrs.reserved().start(),
-            _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords,
-            _hrs.reserved().end());
+            _hrm.reserved().start(),
+            _hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords,
+            _hrm.reserved().end());
   st->cr();
   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
   uint young_regions = _young_list->length();
   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
             (size_t) young_regions * HeapRegion::GrainBytes / K);

@@ -3676,11 +3676,11 @@
     if (!r->startsHumongous()) {
       return false;
     }
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-    uint region_idx = r->hrs_index();
+    uint region_idx = r->hrm_index();
     bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
     // Is_candidate already filters out humongous regions with some remembered set.
     // This will not lead to humongous object that we mistakenly keep alive because
     // during young collection the remembered sets will only be added to.
     if (is_candidate) {

@@ -4198,11 +4198,11 @@
     // It is not yet to safe to tell the concurrent mark to
     // start as we have some optional output below. We don't want the
     // output from the concurrent mark thread interfering with this
     // logging output either.
 
-    _hrs.verify_optional();
+    _hrm.verify_optional();
     verify_region_sets_optional();
 
     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 

@@ -6017,11 +6017,11 @@
                                   FreeRegionList* free_list,
                                   bool par,
                                   bool locked) {
   assert(!hr->isHumongous(), "this is only for non-humongous regions");
   assert(!hr->is_empty(), "the region should not be empty");
-  assert(_hrs.is_available(hr->hrs_index()), "region should be committed");
+  assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
   assert(free_list != NULL, "pre-condition");
 
   if (G1VerifyBitmaps) {
     MemRegion mr(hr->bottom(), hr->end());
     concurrent_mark()->clearRangePrevBitmap(mr);

@@ -6048,11 +6048,11 @@
   // otherwise the information will be gone.
   uint last_index = hr->last_hc_index();
   hr->set_notHumongous();
   free_region(hr, free_list, par);
 
-  uint i = hr->hrs_index() + 1;
+  uint i = hr->hrm_index() + 1;
   while (i < last_index) {
     HeapRegion* curr_hr = region_at(i);
     assert(curr_hr->continuesHumongous(), "invariant");
     curr_hr->set_notHumongous();
     free_region(curr_hr, free_list, par);

@@ -6072,11 +6072,11 @@
 
 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
   assert(list != NULL, "list can't be null");
   if (!list->is_empty()) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    _hrs.insert_list_into_free_list(list);
+    _hrm.insert_list_into_free_list(list);
   }
 }
 
 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
   assert(_summary_bytes_used >= bytes,

@@ -6441,11 +6441,11 @@
     // - they would also pose considerable effort for cleaning up the the remembered
     // sets.
     // While this cleanup is not strictly necessary to be done (or done instantly),
     // given that their occurrence is very low, this saves us this additional
     // complexity.
-    uint region_idx = r->hrs_index();
+    uint region_idx = r->hrm_index();
     if (g1h->humongous_is_live(region_idx) ||
         g1h->humongous_region_is_always_live(region_idx)) {
 
       if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
         gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",

@@ -6680,26 +6680,26 @@
     // Note that emptying the _young_list is postponed and instead done as
     // the first step when rebuilding the regions sets again. The reason for
     // this is that during a full GC string deduplication needs to know if
     // a collected region was young or old when the full GC was initiated.
   }
-  _hrs.remove_all_free_regions();
+  _hrm.remove_all_free_regions();
 }
 
 class RebuildRegionSetsClosure : public HeapRegionClosure {
 private:
   bool            _free_list_only;
   HeapRegionSet*   _old_set;
-  HeapRegionSeq*   _hrs;
+  HeapRegionManager*   _hrm;
   size_t          _total_used;
 
 public:
   RebuildRegionSetsClosure(bool free_list_only,
-                           HeapRegionSet* old_set, HeapRegionSeq* hrs) :
+                           HeapRegionSet* old_set, HeapRegionManager* hrm) :
     _free_list_only(free_list_only),
-    _old_set(old_set), _hrs(hrs), _total_used(0) {
-    assert(_hrs->num_free_regions() == 0, "pre-condition");
+    _old_set(old_set), _hrm(hrm), _total_used(0) {
+    assert(_hrm->num_free_regions() == 0, "pre-condition");
     if (!free_list_only) {
       assert(_old_set->is_empty(), "pre-condition");
     }
   }
 

@@ -6708,11 +6708,11 @@
       return false;
     }
 
     if (r->is_empty()) {
       // Add free regions to the free list
-      _hrs->insert_into_free_list(r);
+      _hrm->insert_into_free_list(r);
     } else if (!_free_list_only) {
       assert(!r->is_young(), "we should not come across young regions");
 
       if (r->isHumongous()) {
         // We ignore humongous regions, we left the humongous set unchanged

@@ -6736,11 +6736,11 @@
 
   if (!free_list_only) {
     _young_list->empty_list();
   }
 
-  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs);
+  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
   heap_region_iterate(&cl);
 
   if (!free_list_only) {
     _summary_bytes_used = cl.total_used();
   }

@@ -6926,44 +6926,44 @@
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
 private:
   HeapRegionSet*   _old_set;
   HeapRegionSet*   _humongous_set;
-  HeapRegionSeq*   _hrs;
+  HeapRegionManager*   _hrm;
 
 public:
   HeapRegionSetCount _old_count;
   HeapRegionSetCount _humongous_count;
   HeapRegionSetCount _free_count;
 
   VerifyRegionListsClosure(HeapRegionSet* old_set,
                            HeapRegionSet* humongous_set,
-                           HeapRegionSeq* hrs) :
-    _old_set(old_set), _humongous_set(humongous_set), _hrs(hrs),
+                           HeapRegionManager* hrm) :
+    _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
     _old_count(), _humongous_count(), _free_count(){ }
 
   bool doHeapRegion(HeapRegion* hr) {
     if (hr->continuesHumongous()) {
       return false;
     }
 
     if (hr->is_young()) {
       // TODO
     } else if (hr->startsHumongous()) {
-      assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
+      assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
       _humongous_count.increment(1u, hr->capacity());
     } else if (hr->is_empty()) {
-      assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
+      assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
       _free_count.increment(1u, hr->capacity());
     } else {
-      assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
+      assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
       _old_count.increment(1u, hr->capacity());
     }
     return false;
   }
 
-  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) {
+  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
     guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
     guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
         old_set->total_capacity_bytes(), _old_count.capacity()));
 
     guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));

@@ -6978,11 +6978,11 @@
 
 void G1CollectedHeap::verify_region_sets() {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   // First, check the explicit lists.
-  _hrs.verify();
+  _hrm.verify();
   {
     // Given that a concurrent operation might be adding regions to
     // the secondary free list we have to take the lock before
     // verifying it.
     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);

@@ -7009,13 +7009,13 @@
   append_secondary_free_list_if_not_empty_with_lock();
 
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
 
-  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs);
+  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
   heap_region_iterate(&cl);
-  cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
+  cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
 }
 
 // Optimized nmethod scanning
 
 class RegisterNMethodOopClosure: public OopClosure {