--- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-02-19 11:43:05.130359789 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-02-19 11:43:04.710346667 +0100 @@ -1061,7 +1061,8 @@ assert(num_free_regions() == 0, "we should not have added any free regions"); rebuild_region_sets(false /* free_list_only */); abort_refinement(); - resize_heap_if_necessary(); + + resize_heap_after_full_gc(); // Rebuild the strong code root lists for each region rebuild_strong_code_roots(); @@ -1165,41 +1166,17 @@ clear_all_soft_refs); } -void G1CollectedHeap::resize_heap_if_necessary() { +void G1CollectedHeap::resize_heap_after_full_gc() { assert_at_safepoint_on_vm_thread(); + assert(collector_state()->in_full_gc(), "Must be"); // Capacity, free and used after the GC counted as full regions to // include the waste in the following calculations. const size_t capacity_after_gc = capacity(); const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes(); - // This is enforced in arguments.cpp. - assert(MinHeapFreeRatio <= MaxHeapFreeRatio, - "otherwise the code below doesn't make sense"); - - // We don't have floating point command-line arguments - const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; - const double maximum_used_percentage = 1.0 - minimum_free_percentage; - const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; - const double minimum_used_percentage = 1.0 - maximum_free_percentage; - - // We have to be careful here as these two calculations can overflow - // 32-bit size_t's. - double used_after_gc_d = (double) used_after_gc; - double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; - double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; - - // Let's make sure that they are both under the max heap size, which - // by default will make them fit into a size_t. - double desired_capacity_upper_bound = (double) MaxHeapSize; - minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, - desired_capacity_upper_bound); - maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, - desired_capacity_upper_bound); - - // We can now safely turn them into size_t's. - size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; - size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; + size_t minimum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MinHeapFreeRatio); + size_t maximum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MaxHeapFreeRatio); // This assert only makes sense here, before we adjust them // with respect to the min and max heap size. @@ -2424,6 +2401,10 @@ return _hrm->max_length() * HeapRegion::GrainBytes; } +size_t G1CollectedHeap::soft_max_capacity() const { + return clamp(align_up(SoftMaxHeapSize, HeapAlignment), MinHeapSize, max_capacity()); +} + jlong G1CollectedHeap::millis_since_last_gc() { // See the notes in GenCollectedHeap::millis_since_last_gc() // for more information about the implementation. @@ -2948,16 +2929,34 @@ verify_numa_regions("GC End"); } -void G1CollectedHeap::expand_heap_after_young_collection(){ - size_t expand_bytes = _heap_sizing_policy->expansion_amount(); +void G1CollectedHeap::resize_heap_after_young_collection() { + Ticks start = Ticks::now(); + if (!expand_heap_after_young_collection()) { + // If we don't attempt to expand heap, try if we need to shrink the heap + shrink_heap_after_young_collection(); + } + phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0); +} + +bool G1CollectedHeap::expand_heap_after_young_collection(){ + size_t expand_bytes = _heap_sizing_policy->expansion_amount_after_young_collection(); if (expand_bytes > 0) { - // No need for an ergo logging here, - // expansion_amount() does this when it returns a value > 0. - double expand_ms; - if (!expand(expand_bytes, _workers, &expand_ms)) { + if (expand(expand_bytes, _workers, NULL)) { // We failed to expand the heap. Cannot do anything about it. } - phase_times()->record_expand_heap_time(expand_ms); + return true; + } + return false; +} + +void G1CollectedHeap::shrink_heap_after_young_collection() { + if (collector_state()->in_young_only_phase() || policy()->next_gc_should_be_mixed()) { + // Do the shrink during gc only at the end of mixed gc phase + return; + } + size_t shrink_bytes = _heap_sizing_policy->shrink_amount_at_last_mixed_gc(policy()->desired_bytes_after_concurrent_mark()); + if (shrink_bytes > 0) { + shrink(shrink_bytes); } } @@ -3126,7 +3125,7 @@ _allocator->init_mutator_alloc_regions(); - expand_heap_after_young_collection(); + resize_heap_after_young_collection(); double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-02-19 11:43:06.759410682 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-02-19 11:43:06.353397998 +0100 @@ -76,6 +76,7 @@ class Space; class G1CardTableEntryClosure; class G1CollectionSet; +class G1HeapSizingPolicy; class G1Policy; class G1HotCardCache; class G1RemSet; @@ -565,7 +566,7 @@ return _g1mm; } - void resize_heap_if_necessary(); + void resize_heap_after_full_gc(); G1NUMA* numa() const { return _numa; } @@ -782,13 +783,17 @@ // Evacuate the next set of optional regions. void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states); + bool expand_heap_after_young_collection(); + void shrink_heap_after_young_collection(); + public: void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss); void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1RedirtyCardsQueueSet* rdcqs, G1ParScanThreadStateSet* pss); - void expand_heap_after_young_collection(); + void resize_heap_after_young_collection(); + // Update object copying statistics. void record_obj_copy_mem_stats(); @@ -990,6 +995,7 @@ const G1CollectorState* collector_state() const { return &_collector_state; } G1CollectorState* collector_state() { return &_collector_state; } + G1HeapSizingPolicy* heap_sizing_policy() const { return _heap_sizing_policy; } // The current policy object for the collector. G1Policy* policy() const { return _policy; } // The remembered set. @@ -1085,7 +1091,7 @@ inline void archive_set_add(HeapRegion* hr); - size_t non_young_capacity_bytes() { + size_t non_young_capacity_bytes() const { return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes; } @@ -1287,6 +1293,9 @@ // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used. virtual size_t max_reserved_capacity() const; + // Print the soft maximum heap capacity. + size_t soft_max_capacity() const; + virtual jlong millis_since_last_gc(); --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2020-02-19 11:43:08.266457763 +0100 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2020-02-19 11:43:07.851444798 +0100 @@ -1205,8 +1205,6 @@ ClassLoaderDataGraph::purge(); } - _g1h->resize_heap_if_necessary(); - compute_new_sizes(); verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); --- old/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp 2020-02-19 11:43:09.781505094 +0100 +++ new/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp 2020-02-19 11:43:09.374492379 +0100 @@ -159,7 +159,7 @@ _cur_resize_tlab_time_ms = 0.0; _cur_derived_pointer_table_update_time_ms = 0.0; _cur_clear_ct_time_ms = 0.0; - _cur_expand_heap_time_ms = 0.0; + _cur_resize_heap_time_ms = 0.0; _cur_ref_proc_time_ms = 0.0; _cur_collection_start_sec = 0.0; _root_region_scan_wait_time_ms = 0.0; @@ -466,7 +466,7 @@ _recorded_total_free_cset_time_ms + _recorded_total_rebuild_freelist_time_ms + _cur_fast_reclaim_humongous_time_ms + - _cur_expand_heap_time_ms + + _cur_resize_heap_time_ms + _cur_string_deduplication_time_ms; info_time("Post Evacuate Collection Set", sum_ms); @@ -518,7 +518,7 @@ if (UseTLAB && ResizeTLAB) { debug_time("Resize TLABs", _cur_resize_tlab_time_ms); } - debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms); + debug_time("Resize Heap After Collection", _cur_resize_heap_time_ms); return sum_ms; --- old/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp 2020-02-19 11:43:11.278551863 +0100 +++ new/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp 2020-02-19 11:43:10.865538960 +0100 @@ -146,7 +146,7 @@ double _cur_derived_pointer_table_update_time_ms; double _cur_clear_ct_time_ms; - double _cur_expand_heap_time_ms; + double _cur_resize_heap_time_ms; double _cur_ref_proc_time_ms; double _cur_collection_start_sec; @@ -260,8 +260,8 @@ _cur_clear_ct_time_ms = ms; } - void record_expand_heap_time(double ms) { - _cur_expand_heap_time_ms = ms; + void record_resize_heap_time(double ms) { + _cur_resize_heap_time_ms = ms; } void record_initial_evac_time(double ms) { @@ -399,8 +399,8 @@ return _cur_clear_ct_time_ms; } - double cur_expand_heap_time_ms() { - return _cur_expand_heap_time_ms; + double cur_resize_heap_time_ms() { + return _cur_resize_heap_time_ms; } double root_region_scan_wait_time_ms() { --- old/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp 2020-02-19 11:43:12.811599757 +0100 +++ new/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp 2020-02-19 11:43:12.370585979 +0100 @@ -26,6 +26,7 @@ #include "gc/g1/g1CollectedHeap.hpp" #include "gc/g1/g1HeapSizingPolicy.hpp" #include "gc/g1/g1Analytics.hpp" +#include "gc/g1/g1Policy.hpp" #include "logging/log.hpp" #include "runtime/globals.hpp" #include "utilities/debug.hpp" @@ -50,7 +51,7 @@ _pauses_since_start = 0; } -size_t G1HeapSizingPolicy::expansion_amount() { +size_t G1HeapSizingPolicy::expansion_amount_after_young_collection() { double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0; double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0; assert(GCTimeRatio > 0, @@ -160,3 +161,36 @@ return expand_bytes; } + +size_t G1HeapSizingPolicy::target_heap_capacity(size_t used_bytes, uintx free_ratio) const { + const double free_percentage = (double) free_ratio / 100.0; + const double used_percentage = 1.0 - free_percentage; + + // We have to be careful here as these two calculations can overflow + // 32-bit size_t's. + double used_bytes_d = (double) used_bytes; + double desired_capacity_d = used_bytes_d / used_percentage; + // Let's make sure that they are both under the max heap size, which + // by default will make it fit into a size_t. + double desired_capacity_upper_bound = (double) MaxHeapSize; + desired_capacity_d = MIN2(desired_capacity_d, desired_capacity_upper_bound); + // We can now safely turn it into size_t's. + return (size_t) desired_capacity_d; +} + +size_t G1HeapSizingPolicy::shrink_amount_at_last_mixed_gc(size_t desired_bytes_after_concurrent_mark) { + size_t shrink_bytes = 0; + const size_t capacity_after_gc = _g1h->capacity(); + const size_t used_after_gc = capacity_after_gc - _g1h->unused_committed_regions_in_bytes(); + size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio); + // soft_max_capacity can be smaller + maximum_desired_capacity = MIN2(maximum_desired_capacity, _g1h->soft_max_capacity()); + // Make sure not less than _minimum_desired_bytes_after_last_cm + maximum_desired_capacity = MAX2(maximum_desired_capacity, desired_bytes_after_concurrent_mark); + + if (capacity_after_gc > maximum_desired_capacity) { + shrink_bytes = capacity_after_gc - maximum_desired_capacity; + } + + return shrink_bytes; +} --- old/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp 2020-02-19 11:43:14.315646744 +0100 +++ new/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp 2020-02-19 11:43:13.902633841 +0100 @@ -45,15 +45,18 @@ double _ratio_over_threshold_sum; uint _pauses_since_start; - -protected: G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics); + public: // If an expansion would be appropriate, because recent GC overhead had // exceeded the desired limit, return an amount to expand by. - virtual size_t expansion_amount(); + size_t expansion_amount_after_young_collection(); + + size_t shrink_amount_at_last_mixed_gc(size_t desired_bytes_after_concurrent_mark); + // Calculate the target capacity based on used bytes and free ratio + size_t target_heap_capacity(size_t used_bytes, uintx free_ratio) const; // Clear ratio tracking data used by expansion_amount(). void clear_ratio_check_data(); --- old/src/hotspot/share/gc/g1/g1IHOPControl.cpp 2020-02-19 11:43:15.822693825 +0100 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.cpp 2020-02-19 11:43:15.402680704 +0100 @@ -108,7 +108,7 @@ double safe_total_heap_percentage = MIN2((double)(_heap_reserve_percent + _heap_waste_percent), 100.0); return (size_t)MIN2( - G1CollectedHeap::heap()->max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0, + G1CollectedHeap::heap()->soft_max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0, _target_occupancy * (100.0 - _heap_waste_percent) / 100.0 ); } @@ -187,3 +187,8 @@ predict(&_marking_times_s), have_enough_data_for_prediction()); } + +size_t G1AdaptiveIHOPControl::predict_unrestrained_buffer_size() const { + // Besides the young size, the promotion bytes of Prepare Mixed and 1st Mixed GC will be counted + return _last_unrestrained_young_size + _last_allocated_bytes * 2; +} --- old/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-02-19 11:43:17.313740407 +0100 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-02-19 11:43:16.903727598 +0100 @@ -77,6 +77,8 @@ virtual void print(); virtual void send_trace_event(G1NewTracer* tracer); + + virtual size_t predict_unrestrained_buffer_size() const { return 0; } }; // The returned concurrent mark starting occupancy threshold is a fixed value @@ -147,6 +149,7 @@ virtual void print(); virtual void send_trace_event(G1NewTracer* tracer); + virtual size_t predict_unrestrained_buffer_size() const; }; #endif // SHARE_GC_G1_G1IHOPCONTROL_HPP --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2020-02-19 11:43:18.797786770 +0100 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2020-02-19 11:43:18.391774085 +0100 @@ -32,6 +32,7 @@ #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1CollectionSetChooser.hpp" +#include "gc/g1/g1HeapSizingPolicy.hpp" #include "gc/g1/g1HeterogeneousHeapPolicy.hpp" #include "gc/g1/g1HotCardCache.hpp" #include "gc/g1/g1IHOPControl.hpp" @@ -76,6 +77,7 @@ _total_concurrent_refined_cards(0), _total_concurrent_refinement_time(), _bytes_allocated_in_old_since_last_gc(0), + _minimum_desired_bytes_after_last_cm(0), _initial_mark_to_mixed(), _collection_set(NULL), _g1h(NULL), @@ -1095,6 +1097,21 @@ } } +void G1Policy::determine_desired_bytes_after_concurrent_mark() { + size_t cur_used_bytes = _g1h->non_young_capacity_bytes(); + + size_t overall_target_capacity = _g1h->heap_sizing_policy()->target_heap_capacity(cur_used_bytes, MinHeapFreeRatio); + + size_t desired_bytes_after_concurrent_mark = _g1h->policy()->desired_bytes_after_concurrent_mark(cur_used_bytes); + + _minimum_desired_bytes_after_last_cm = MIN2(desired_bytes_after_concurrent_mark, overall_target_capacity); + + log_debug(gc, ergo, heap)("Expansion amount after remark used: " SIZE_FORMAT " " + "minimum_desired_capacity " SIZE_FORMAT " desired_bytes_after_concurrent_mark: " SIZE_FORMAT " " + "minimum_desired_bytes_after_concurrent_mark " SIZE_FORMAT, + cur_used_bytes, overall_target_capacity, desired_bytes_after_concurrent_mark, _minimum_desired_bytes_after_last_cm); +} + void G1Policy::record_concurrent_mark_cleanup_end() { G1CollectionSetCandidates* candidates = G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions()); _collection_set->set_candidates(candidates); @@ -1107,6 +1124,8 @@ collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending); collector_state()->set_mark_or_rebuild_in_progress(false); + determine_desired_bytes_after_concurrent_mark(); + double end_sec = os::elapsedTime(); double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms); @@ -1199,8 +1218,10 @@ const char* false_action_str) const { G1CollectionSetCandidates* candidates = _collection_set->candidates(); - if (candidates->is_empty()) { - log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); + if (candidates == NULL || candidates->is_empty()) { + if (false_action_str != NULL) { + log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); + } return false; } @@ -1209,12 +1230,16 @@ double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); double threshold = (double) G1HeapWastePercent; if (reclaimable_percent <= threshold) { - log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, - false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); + if (false_action_str != NULL) { + log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, + false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); + } return false; } - log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, - true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); + if (true_action_str != NULL) { + log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, + true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); + } return true; } @@ -1411,3 +1436,10 @@ // the survivor regions from this evacuation pause as 'young' // at the start of the next. } + +size_t G1Policy::desired_bytes_after_concurrent_mark(size_t used_bytes) { + size_t minimum_desired_buffer_size = _ihop_control->predict_unrestrained_buffer_size(); + return minimum_desired_buffer_size != 0 ? + minimum_desired_buffer_size : + _young_list_max_length * HeapRegion::GrainBytes + _reserve_regions * HeapRegion::GrainBytes + used_bytes; +} --- old/src/hotspot/share/gc/g1/g1Policy.hpp 2020-02-19 11:43:20.325834507 +0100 +++ new/src/hotspot/share/gc/g1/g1Policy.hpp 2020-02-19 11:43:19.906821417 +0100 @@ -110,6 +110,10 @@ // young GC phase. size_t _bytes_allocated_in_old_since_last_gc; + size_t _minimum_desired_bytes_after_last_cm; + + void determine_desired_bytes_after_concurrent_mark(); + G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed; bool should_update_surv_rate_group_predictors() { @@ -338,8 +342,8 @@ void print_phases(); - bool next_gc_should_be_mixed(const char* true_action_str, - const char* false_action_str) const; + bool next_gc_should_be_mixed(const char* true_action_str = NULL, + const char* false_action_str = NULL) const; // Calculate and return the number of initial and optional old gen regions from // the given collection set candidates and the remaining time. @@ -377,6 +381,8 @@ // the initial-mark work and start a marking cycle. void decide_on_conc_mark_initiation(); + size_t desired_bytes_after_concurrent_mark() const { return _minimum_desired_bytes_after_last_cm; } + size_t young_list_target_length() const { return _young_list_target_length; } bool should_allocate_mutator_region() const; @@ -444,6 +450,7 @@ virtual bool force_upgrade_to_full() { return false; } + size_t desired_bytes_after_concurrent_mark(size_t used_bytes); }; #endif // SHARE_GC_G1_G1POLICY_HPP --- old/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java 2020-02-19 11:43:21.947885181 +0100 +++ new/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java 2020-02-19 11:43:21.476870466 +0100 @@ -140,7 +140,7 @@ new LogMessageWithLevel("String Deduplication", Level.DEBUG), new LogMessageWithLevel("Queue Fixup", Level.DEBUG), new LogMessageWithLevel("Table Fixup", Level.DEBUG), - new LogMessageWithLevel("Expand Heap After Collection", Level.DEBUG), + new LogMessageWithLevel("Resize Heap After Collection", Level.DEBUG), new LogMessageWithLevel("Region Register", Level.DEBUG), new LogMessageWithLevel("Prepare Heap Roots", Level.DEBUG), // Free CSet