< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 59704 : imported patch 8236073-softmaxheapsize

@@ -1033,11 +1033,12 @@
 
   // Prepare heap for normal collections.
   assert(num_free_regions() == 0, "we should not have added any free regions");
   rebuild_region_sets(false /* free_list_only */);
   abort_refinement();
-  resize_heap_if_necessary();
+
+  resize_heap_after_full_collection();
 
   // Rebuild the strong code root lists for each region
   rebuild_strong_code_roots();
 
   // Purge code root memory

@@ -1138,45 +1139,20 @@
   // out by the GC locker). So, right now, we'll ignore the return value.
   bool dummy = do_full_collection(true,                /* explicit_gc */
                                   clear_all_soft_refs);
 }
 
-void G1CollectedHeap::resize_heap_if_necessary() {
+void G1CollectedHeap::resize_heap_after_full_collection() {
   assert_at_safepoint_on_vm_thread();
 
   // Capacity, free and used after the GC counted as full regions to
   // include the waste in the following calculations.
   const size_t capacity_after_gc = capacity();
   const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
 
-  // This is enforced in arguments.cpp.
-  assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
-         "otherwise the code below doesn't make sense");
-
-  // We don't have floating point command-line arguments
-  const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
-  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
-  const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
-  const double minimum_used_percentage = 1.0 - maximum_free_percentage;
-
-  // We have to be careful here as these two calculations can overflow
-  // 32-bit size_t's.
-  double used_after_gc_d = (double) used_after_gc;
-  double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
-  double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
-
-  // Let's make sure that they are both under the max heap size, which
-  // by default will make them fit into a size_t.
-  double desired_capacity_upper_bound = (double) MaxHeapSize;
-  minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
-                                    desired_capacity_upper_bound);
-  maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
-                                    desired_capacity_upper_bound);
-
-  // We can now safely turn them into size_t's.
-  size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
-  size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
+  size_t minimum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MinHeapFreeRatio);
+  size_t maximum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MaxHeapFreeRatio);
 
   // This assert only makes sense here, before we adjust them
   // with respect to the min and max heap size.
   assert(minimum_desired_capacity <= maximum_desired_capacity,
          "minimum_desired_capacity = " SIZE_FORMAT ", "

@@ -1194,11 +1170,11 @@
 
   if (capacity_after_gc < minimum_desired_capacity) {
     // Don't expand unless it's significant
     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
 
-    log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). "
+    log_debug(gc, ergo, heap)("Heap resize. Attempt heap expansion (capacity lower than min desired capacity). "
                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
                               "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
                               capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
 
     expand(expand_bytes, _workers);

@@ -1206,11 +1182,11 @@
     // No expansion, now see if we want to shrink
   } else if (capacity_after_gc > maximum_desired_capacity) {
     // Capacity too large, compute shrinking size
     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
 
-    log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). "
+    log_debug(gc, ergo, heap)("Heap resize. Attempt heap shrinking (capacity higher than max desired capacity). "
                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
                               "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
                               capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
 
     shrink(shrink_bytes);

@@ -1320,19 +1296,21 @@
   }
   return NULL;
 }
 
 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
+  assert(expand_bytes > 0, "must be");
+
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   aligned_expand_bytes = align_up(aligned_expand_bytes,
                                        HeapRegion::GrainBytes);
 
-  log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
+  log_debug(gc, ergo, heap)("Heap resize. requested expansion amount: " SIZE_FORMAT "B aligned expansion amount: " SIZE_FORMAT "B",
                             expand_bytes, aligned_expand_bytes);
 
-  if (is_maximal_no_gc()) {
-    log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
+  if (capacity() == max_capacity()) {
+    log_debug(gc, ergo, heap)("Heap resize. Did not expand the heap (heap already fully expanded)");
     return false;
   }
 
   double expand_heap_start_time_sec = os::elapsedTime();
   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);

@@ -1345,12 +1323,15 @@
 
   if (expanded_by > 0) {
     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
     policy()->record_new_heap_size(num_regions());
+
+    log_debug(gc, ergo, heap)("Heap resize. Requested expansion amount: " SIZE_FORMAT "B actual expansion amount: " SIZE_FORMAT "B",
+                              aligned_expand_bytes, actual_expand_bytes);
   } else {
-    log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
+    log_debug(gc, ergo, heap)("Heap resize. Did not expand the heap (heap expansion operation failed)");
 
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
     if (G1ExitOnExpansionFailure &&
         _hrm->available() >= regions_to_expand) {

@@ -1364,50 +1345,70 @@
 bool G1CollectedHeap::expand_single_region(uint node_index) {
   uint expanded_by = _hrm->expand_on_preferred_node(node_index);
 
   if (expanded_by == 0) {
     assert(is_maximal_no_gc(), "Should be no regions left, available: %u", _hrm->available());
-    log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
+    log_debug(gc, ergo, heap)("Heap resize. Did not expand the heap (heap already fully expanded)");
     return false;
   }
 
   policy()->record_new_heap_size(num_regions());
   return true;
 }
 
 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
-  size_t aligned_shrink_bytes =
-    ReservedSpace::page_align_size_down(shrink_bytes);
-  aligned_shrink_bytes = align_down(aligned_shrink_bytes,
-                                         HeapRegion::GrainBytes);
+  assert(shrink_bytes > 0, "must be");
+  assert(is_aligned(shrink_bytes, HeapRegion::GrainBytes),
+         "Shrink request for " SIZE_FORMAT "B not aligned to heap region size " SIZE_FORMAT "B",
+         shrink_bytes, HeapRegion::GrainBytes);
+
   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
 
   uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
-  log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
-                            shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
+  log_debug(gc, ergo, heap)("Heap resize. Requested shrinking amount: " SIZE_FORMAT "B actual shrinking amount: " SIZE_FORMAT "B",
+                            shrink_bytes, shrunk_bytes);
   if (num_regions_removed > 0) {
     policy()->record_new_heap_size(num_regions());
   } else {
-    log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
+    log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (heap shrinking operation failed)");
   }
 }
 
 void G1CollectedHeap::shrink(size_t shrink_bytes) {
-  _verifier->verify_region_sets_optional();
+  size_t aligned_shrink_bytes = ReservedSpace::page_align_size_down(shrink_bytes);
+  aligned_shrink_bytes = align_down(aligned_shrink_bytes,
+                                    HeapRegion::GrainBytes);
+
+  aligned_shrink_bytes = capacity() - MAX2(capacity() - aligned_shrink_bytes, min_capacity());
+  assert(is_aligned(aligned_shrink_bytes, HeapRegion::GrainBytes), "Bytes to shrink " SIZE_FORMAT "B not aligned", aligned_shrink_bytes);
 
+  log_debug(gc, ergo, heap)("Heap resize. Requested shrink amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
+                            shrink_bytes, aligned_shrink_bytes);
+
+  if (aligned_shrink_bytes == 0) {
+    log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (shrink request too small)");
+    return;
+  }
+  if (capacity() == min_capacity()) {
+    log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (heap already at minimum)");
+    return;
+  }
+  assert(aligned_shrink_bytes > 0, "capacity " SIZE_FORMAT " min_capacity " SIZE_FORMAT, capacity(), min_capacity());
+
+  _verifier->verify_region_sets_optional();
   // We should only reach here at the end of a Full GC or during Remark which
   // means we should not not be holding to any GC alloc regions. The method
   // below will make sure of that and do any remaining clean up.
   _allocator->abandon_gc_alloc_regions();
 
   // Instead of tearing down / rebuilding the free lists here, we
   // could instead use the remove_all_pending() method on free_list to
   // remove only the ones that we need to remove.
   tear_down_region_sets(true /* free_list_only */);
-  shrink_helper(shrink_bytes);
+  shrink_helper(aligned_shrink_bytes);
   rebuild_region_sets(true /* free_list_only */);
 
   _hrm->verify_optional();
   _verifier->verify_region_sets_optional();
 }

@@ -2422,14 +2423,22 @@
 
 size_t G1CollectedHeap::max_capacity() const {
   return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
 }
 
+size_t G1CollectedHeap::min_capacity() const {
+  return MinHeapSize;
+}
+
 size_t G1CollectedHeap::max_reserved_capacity() const {
   return _hrm->max_length() * HeapRegion::GrainBytes;
 }
 
+size_t G1CollectedHeap::soft_max_capacity() const {
+  return clamp(align_up(SoftMaxHeapSize, HeapAlignment), MinHeapSize, max_capacity());
+}
+
 jlong G1CollectedHeap::millis_since_last_gc() {
   // See the notes in GenCollectedHeap::millis_since_last_gc()
   // for more information about the implementation.
   jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
                   _policy->collection_pause_end_millis();

@@ -2940,21 +2949,21 @@
   _verifier->verify_after_gc(type);
   _verifier->check_bitmaps("GC End");
   verify_numa_regions("GC End");
 }
 
-void G1CollectedHeap::expand_heap_after_young_collection(){
-  size_t expand_bytes = _heap_sizing_policy->expansion_amount();
-  if (expand_bytes > 0) {
-    // No need for an ergo logging here,
-    // expansion_amount() does this when it returns a value > 0.
-    double expand_ms;
-    if (!expand(expand_bytes, _workers, &expand_ms)) {
-      // We failed to expand the heap. Cannot do anything about it.
-    }
-    phase_times()->record_expand_heap_time(expand_ms);
+void G1CollectedHeap::resize_heap_after_young_gc() {
+  Ticks start = Ticks::now();
+
+  ssize_t resize_bytes = _heap_sizing_policy->resize_amount_after_young_gc();
+  if (resize_bytes > 0) {
+    expand(resize_bytes, _workers, NULL);
+  } else if (resize_bytes < 0) {
+    shrink(-resize_bytes);
   }
+
+  phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0);
 }
 
 const char* G1CollectedHeap::young_gc_name() const {
   if (collector_state()->in_initial_mark_gc()) {
     return "Pause Young (Concurrent Start)";

@@ -3118,11 +3127,11 @@
 
         allocate_dummy_regions();
 
         _allocator->init_mutator_alloc_regions();
 
-        expand_heap_after_young_collection();
+        resize_heap_after_young_gc();
 
         double sample_end_time_sec = os::elapsedTime();
         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
         policy()->record_collection_pause_end(pause_time_ms);
       }
< prev index next >