< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 51979 : imported patch 8071913-almost-done
rev 51980 : [mq]: 8071913-alternate
rev 51981 : [mq]: 6490394-uncommit-at-remark

@@ -1042,11 +1042,11 @@
 
   // Prepare heap for normal collections.
   assert(num_free_regions() == 0, "we should not have added any free regions");
   rebuild_region_sets(false /* free_list_only */);
   abort_refinement();
-  resize_if_necessary_after_full_collection();
+  resize_heap_if_necessary();
 
   // Rebuild the strong code root lists for each region
   rebuild_strong_code_roots();
 
   // Start a new incremental collection set for the next pause

@@ -1145,11 +1145,11 @@
   // out by the GC locker). So, right now, we'll ignore the return value.
   bool dummy = do_full_collection(true,                /* explicit_gc */
                                   clear_all_soft_refs);
 }
 
-void G1CollectedHeap::resize_if_necessary_after_full_collection() {
+void G1CollectedHeap::resize_heap_if_necessary() {
   // Capacity, free and used after the GC counted as full regions to
   // include the waste in the following calculations.
   const size_t capacity_after_gc = capacity();
   const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
 

@@ -1202,11 +1202,11 @@
 
   if (capacity_after_gc < minimum_desired_capacity) {
     // Don't expand unless it's significant
     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
 
-    log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
+    log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). "
                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
                               "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
                               capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
 
     expand(expand_bytes, _workers);

@@ -1214,11 +1214,11 @@
     // No expansion, now see if we want to shrink
   } else if (capacity_after_gc > maximum_desired_capacity) {
     // Capacity too large, compute shrinking size
     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
 
-    log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
+    log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). "
                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
                               "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
                               capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
 
     shrink(shrink_bytes);

@@ -1390,12 +1390,12 @@
 }
 
 void G1CollectedHeap::shrink(size_t shrink_bytes) {
   _verifier->verify_region_sets_optional();
 
-  // We should only reach here at the end of a Full GC which means we
-  // should not not be holding to any GC alloc regions. The method
+  // We should only reach here at the end of a Full GC or during Remark which
+  // means we should not not be holding to any GC alloc regions. The method
   // below will make sure of that and do any remaining clean up.
   _allocator->abandon_gc_alloc_regions();
 
   // Instead of tearing down / rebuilding the free lists here, we
   // could instead use the remove_all_pending() method on free_list to

@@ -4393,17 +4393,17 @@
       assert(_old_set->is_empty(), "pre-condition");
     }
   }
 
   bool do_heap_region(HeapRegion* r) {
-    // After full GC, no region should have a remembered set.
-    r->rem_set()->clear(true);
     if (r->is_empty()) {
+      assert(r->rem_set()->is_empty(), "Remembered sets should be empty.");
       // Add free regions to the free list
       r->set_free();
       _hrm->insert_into_free_list(r);
     } else if (!_free_list_only) {
+      assert(r->rem_set()->is_empty(), "Remembered sets should be empty.");
 
       if (r->is_archive() || r->is_humongous()) {
         // We ignore archive and humongous regions. We left these sets unchanged.
       } else {
         assert(r->is_young() || r->is_free() || r->is_old(), "invariant");

@@ -4437,14 +4437,13 @@
     set_used(cl.total_used());
     if (_archive_allocator != NULL) {
       _archive_allocator->clear_used();
     }
   }
-  assert(used_unlocked() == recalculate_used(),
-         "inconsistent used_unlocked(), "
-         "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
-         used_unlocked(), recalculate_used());
+  assert(used() == recalculate_used(),
+         "inconsistent used(), value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
+         used(), recalculate_used());
 }
 
 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
   HeapRegion* hr = heap_region_containing(p);
   return hr->is_in(p);
< prev index next >