--- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-02-11 11:34:41.698003183 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-02-11 11:34:41.286990548 +0100 @@ -371,7 +371,7 @@ word_size * HeapWordSize); _hrm->expand_at(first, obj_regions, workers()); - policy()->record_new_heap_size(num_regions()); + update_heap_target_size(); #ifdef ASSERT for (uint i = first; i < first + obj_regions; ++i) { @@ -1371,7 +1371,7 @@ if (expanded_by > 0) { size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); - policy()->record_new_heap_size(num_regions()); + update_heap_target_size(); } else { log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)"); @@ -1395,7 +1395,7 @@ return false; } - policy()->record_new_heap_size(num_regions()); + update_heap_target_size(); return true; } @@ -1412,7 +1412,7 @@ log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B", shrink_bytes, aligned_shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { - policy()->record_new_heap_size(num_regions()); + update_heap_target_size(); } else { log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)"); } @@ -1437,6 +1437,12 @@ _verifier->verify_region_sets_optional(); } +void G1CollectedHeap::update_heap_target_size() { + uint soft_goal_num_regions = (soft_max_capacity() + HeapRegion::GrainBytes - 1) / HeapRegion::GrainBytes; + + _policy->update_heap_target_size(num_regions(), soft_goal_num_regions); +} + class OldRegionSetChecker : public HeapRegionSetChecker { public: void check_mt_safety() { @@ -2420,6 +2426,10 @@ return _hrm->max_length() * HeapRegion::GrainBytes; } +size_t G1CollectedHeap::soft_max_capacity() const { + return clamp(align_up(SoftMaxHeapSize, HeapAlignment), MinHeapSize, max_capacity()); +} + jlong G1CollectedHeap::millis_since_last_gc() { // See the notes in GenCollectedHeap::millis_since_last_gc() // for more information about the implementation. @@ -3011,6 +3021,8 @@ _verifier->verify_region_sets_optional(); _verifier->verify_dirty_young_regions(); + update_heap_target_size(); + // We should not be doing initial mark unless the conc mark thread is running if (!_cm_thread->should_terminate()) { // This call will decide whether this pause is an initial-mark --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-02-11 11:34:43.232050344 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-02-11 11:34:42.824037800 +0100 @@ -543,6 +543,8 @@ void verify_numa_regions(const char* desc); + void update_heap_target_size(); + public: G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; } @@ -1287,6 +1289,9 @@ // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used. virtual size_t max_reserved_capacity() const; + // Print the soft maximum heap capacity. + size_t soft_max_capacity() const; + virtual jlong millis_since_last_gc(); --- old/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp 2020-02-11 11:34:44.738096643 +0100 +++ new/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp 2020-02-11 11:34:44.332084161 +0100 @@ -64,8 +64,8 @@ // If the heap is at less than half its maximum size, scale the threshold down, // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, // though the scaling code will likely keep the increase small. - if (_g1h->capacity() <= _g1h->max_capacity() / 2) { - threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2); + if (_g1h->capacity() <= _g1h->soft_max_capacity() / 2) { + threshold *= (double)_g1h->capacity() / (double)(_g1h->soft_max_capacity() / 2); threshold = MAX2(threshold, 1.0); } --- old/src/hotspot/share/gc/g1/g1IHOPControl.cpp 2020-02-11 11:34:46.230142512 +0100 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.cpp 2020-02-11 11:34:45.822129969 +0100 @@ -39,8 +39,8 @@ } void G1IHOPControl::update_target_occupancy(size_t new_target_occupancy) { - log_debug(gc, ihop)("Target occupancy update: old: " SIZE_FORMAT "B, new: " SIZE_FORMAT "B", - _target_occupancy, new_target_occupancy); + log_debug(gc, ihop)("Target occupancy update: old: " SIZE_FORMAT "B, new: " SIZE_FORMAT "B, soft " SIZE_FORMAT "B", + _target_occupancy, new_target_occupancy, G1CollectedHeap::heap()->soft_max_capacity()); _target_occupancy = new_target_occupancy; } @@ -81,6 +81,11 @@ _last_marking_length_s(0.0) { } +size_t G1StaticIHOPControl::get_conc_mark_start_threshold() { + guarantee(_target_occupancy > 0, "Target occupancy must have been initialized."); + return (size_t) (_initial_ihop_percent * _target_occupancy / 100.0); +} + G1AdaptiveIHOPControl::G1AdaptiveIHOPControl(double ihop_percent, G1Predictions const* predictor, size_t heap_reserve_percent, @@ -108,7 +113,7 @@ double safe_total_heap_percentage = MIN2((double)(_heap_reserve_percent + _heap_waste_percent), 100.0); return (size_t)MIN2( - G1CollectedHeap::heap()->max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0, + G1CollectedHeap::heap()->soft_max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0, _target_occupancy * (100.0 - _heap_waste_percent) / 100.0 ); } @@ -154,6 +159,8 @@ _allocation_rate_s.add(allocation_rate); _last_unrestrained_young_size = additional_buffer_size; + + log_error(gc)("update alloc info: time %1.2fms bytes " SIZE_FORMAT " additional buffer " SIZE_FORMAT, allocation_time_s * 1000.0, allocated_bytes, additional_buffer_size); } void G1AdaptiveIHOPControl::update_marking_length(double marking_length_s) { --- old/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-02-11 11:34:47.739188904 +0100 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-02-11 11:34:47.328176268 +0100 @@ -90,10 +90,7 @@ public: G1StaticIHOPControl(double ihop_percent); - size_t get_conc_mark_start_threshold() { - guarantee(_target_occupancy > 0, "Target occupancy must have been initialized."); - return (size_t) (_initial_ihop_percent * _target_occupancy / 100.0); - } + size_t get_conc_mark_start_threshold(); virtual void update_marking_length(double marking_length_s) { assert(marking_length_s > 0.0, "Marking length must be larger than zero but is %.3f", marking_length_s); --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2020-02-11 11:34:49.216234312 +0100 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2020-02-11 11:34:48.810221830 +0100 @@ -179,18 +179,38 @@ } }; -void G1Policy::record_new_heap_size(uint new_number_of_regions) { - // re-calculate the necessary reserve - double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; - // We use ceiling so that if reserve_regions_d is > 0.0 (but - // smaller than 1.0) we'll get 1. - _reserve_regions = (uint) ceil(reserve_regions_d); - +void G1Policy::record_new_target_heap_size(uint new_number_of_regions, uint reserve_regions) { + _reserve_regions = reserve_regions; _young_gen_sizer->heap_size_changed(new_number_of_regions); - _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes); } +void G1Policy::update_heap_target_size(uint num_regions, uint soft_goal_num_regions) { + uint desired_number_of_regions = MIN2(num_regions, soft_goal_num_regions); + + uint regular_reserve_regions = (uint)ceil((double)num_regions * G1ReservePercent / 100); + uint reserve_regions = regular_reserve_regions; + uint soft_reserve_regions = 0; + + if (soft_goal_num_regions < num_regions) { + soft_reserve_regions = num_regions - soft_goal_num_regions + + (uint)ceil((double)soft_goal_num_regions * G1ReservePercent / 100); + reserve_regions = soft_reserve_regions; + } + + // FIXME: this method is called very early during startup during initial heap expansion, so + // the _g1h member has not been set yet. + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + uint MBPerRegion = HeapRegion::GrainBytes / (1024 * 1024); + log_error(gc)("heap target update: soft-goal %u committed %u goal %u hard-reserve %u soft-reserve %u reserve %u free %u used %u", + soft_goal_num_regions * MBPerRegion, num_regions * MBPerRegion, + desired_number_of_regions * MBPerRegion, + regular_reserve_regions * MBPerRegion, soft_reserve_regions * MBPerRegion, reserve_regions * MBPerRegion, + g1h->num_free_regions() * MBPerRegion, g1h->num_used_regions() * MBPerRegion); + + record_new_target_heap_size(desired_number_of_regions, reserve_regions); +} + uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const { uint desired_min_length = 0; if (use_adaptive_young_list_length()) { --- old/src/hotspot/share/gc/g1/g1Policy.hpp 2020-02-11 11:34:50.771282118 +0100 +++ new/src/hotspot/share/gc/g1/g1Policy.hpp 2020-02-11 11:34:50.316268129 +0100 @@ -289,6 +289,7 @@ void record_concurrent_refinement_data(bool is_full_collection); + void record_new_target_heap_size(uint new_number_of_regions, uint reserve_regions); public: G1Policy(STWGCTimer* gc_timer); @@ -306,8 +307,9 @@ // higher, recalculate the young list target length prediction. void revise_young_list_target_length_if_necessary(size_t rs_length); - // This should be called after the heap is resized. - void record_new_heap_size(uint new_number_of_regions); + // Updates heap size targets taking current heap dimensions into account. This should + // be called after changes to the sizes of the heap. + void update_heap_target_size(uint num_regions, uint soft_goal_num_regions); virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);