--- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-02-17 17:22:56.188243279 +0800 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-02-17 17:22:56.191243387 +0800 @@ -1179,29 +1179,8 @@ assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "otherwise the code below doesn't make sense"); - // We don't have floating point command-line arguments - const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0; - const double maximum_used_percentage = 1.0 - minimum_free_percentage; - const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0; - const double minimum_used_percentage = 1.0 - maximum_free_percentage; - - // We have to be careful here as these two calculations can overflow - // 32-bit size_t's. - double used_after_gc_d = (double) used_after_gc; - double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage; - double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage; - - // Let's make sure that they are both under the max heap size, which - // by default will make them fit into a size_t. - double desired_capacity_upper_bound = (double) MaxHeapSize; - minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d, - desired_capacity_upper_bound); - maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d, - desired_capacity_upper_bound); - - // We can now safely turn them into size_t's. - size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d; - size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d; + size_t minimum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MinHeapFreeRatio); + size_t maximum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MinHeapFreeRatio); // This assert only makes sense here, before we adjust them // with respect to the min and max heap size. @@ -2426,6 +2405,10 @@ return _hrm->max_length() * HeapRegion::GrainBytes; } +size_t G1CollectedHeap::soft_max_capacity() const { + return clamp(align_up(SoftMaxHeapSize, HeapAlignment), MinHeapSize, max_capacity()); +} + jlong G1CollectedHeap::millis_since_last_gc() { // See the notes in GenCollectedHeap::millis_since_last_gc() // for more information about the implementation. @@ -2952,16 +2935,41 @@ verify_numa_regions("GC End"); } -void G1CollectedHeap::expand_heap_after_young_collection(){ - size_t expand_bytes = _heap_sizing_policy->expansion_amount(); +void G1CollectedHeap::resize_heap_after_young_collection() { + Ticks start = Ticks::now(); + if (!expand_heap_after_young_collection()) { + // If we don't attempt to expand heap, try if we need to shrink the heap + shrink_heap_after_young_collection(); + } + phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0); +} + +bool G1CollectedHeap::expand_heap_after_young_collection(){ + size_t expand_bytes = _heap_sizing_policy->expansion_amount_after_young_collection(); if (expand_bytes > 0) { - // No need for an ergo logging here, - // expansion_amount() does this when it returns a value > 0. - double expand_ms; - if (!expand(expand_bytes, _workers, &expand_ms)) { + if (expand(expand_bytes, _workers, NULL)) { // We failed to expand the heap. Cannot do anything about it. } - phase_times()->record_expand_heap_time(expand_ms); + return true; + } + return false; +} + +void G1CollectedHeap::shrink_heap_after_young_collection() { + if (!collector_state()->finish_of_mixed_gc()) { + // Do the shrink only after finish of mixed gc + return; + } + size_t shrink_bytes = _heap_sizing_policy->shrink_amount_after_mixed_collections(); + if (shrink_bytes > 0) { + shrink(shrink_bytes); + } +} + +void G1CollectedHeap::expand_heap_after_concurrent_mark() { + size_t expand_bytes = _heap_sizing_policy->expansion_amount_after_concurrent_mark(); + if (expand_bytes > 0) { + expand(expand_bytes, _workers, NULL); } } @@ -3117,7 +3125,7 @@ _allocator->init_mutator_alloc_regions(); - expand_heap_after_young_collection(); + resize_heap_after_young_collection(); double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-02-17 17:22:56.339248719 +0800 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-02-17 17:22:56.342248827 +0800 @@ -781,7 +781,11 @@ G1RedirtyCardsQueueSet* rdcqs, G1ParScanThreadStateSet* pss); - void expand_heap_after_young_collection(); + void resize_heap_after_young_collection(); + bool expand_heap_after_young_collection(); + void shrink_heap_after_young_collection(); + void expand_heap_after_concurrent_mark(); + // Update object copying statistics. void record_obj_copy_mem_stats(); @@ -1078,7 +1082,7 @@ inline void archive_set_add(HeapRegion* hr); - size_t non_young_capacity_bytes() { + size_t non_young_capacity_bytes() const { return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes; } @@ -1280,6 +1284,9 @@ // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used. virtual size_t max_reserved_capacity() const; + // Print the soft maximum heap capacity. + size_t soft_max_capacity() const; + virtual jlong millis_since_last_gc(); --- old/src/hotspot/share/gc/g1/g1CollectorState.hpp 2020-02-17 17:22:56.425251817 +0800 +++ new/src/hotspot/share/gc/g1/g1CollectorState.hpp 2020-02-17 17:22:56.428251925 +0800 @@ -71,6 +71,9 @@ // Set during a full gc pause. bool _in_full_gc; + // Indicate finish of mixed gc(s) + bool _finish_of_mixed_gc; + public: G1CollectorState() : _in_young_only_phase(true), @@ -81,7 +84,8 @@ _mark_or_rebuild_in_progress(false), _clearing_next_bitmap(false), - _in_full_gc(false) { } + _in_full_gc(false), + _finish_of_mixed_gc(false) { } // Phase setters void set_in_young_only_phase(bool v) { _in_young_only_phase = v; } @@ -90,6 +94,7 @@ void set_in_young_gc_before_mixed(bool v) { _in_young_gc_before_mixed = v; } void set_in_initial_mark_gc(bool v) { _in_initial_mark_gc = v; } void set_in_full_gc(bool v) { _in_full_gc = v; } + void set_finish_of_mixed_gc(bool v) { _finish_of_mixed_gc = v; } void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; } @@ -109,6 +114,7 @@ bool mark_or_rebuild_in_progress() const { return _mark_or_rebuild_in_progress; } bool clearing_next_bitmap() const { return _clearing_next_bitmap; } + bool finish_of_mixed_gc() const { return _finish_of_mixed_gc; } G1YCType yc_type() const { if (in_initial_mark_gc()) { --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2020-02-17 17:22:56.492254231 +0800 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2020-02-17 17:22:56.494254303 +0800 @@ -1209,8 +1209,6 @@ ClassLoaderDataGraph::purge(); } - _g1h->resize_heap_if_necessary(); - compute_new_sizes(); verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark after"); @@ -1382,6 +1380,11 @@ // races with it goes around and waits for Cleanup to finish. _g1h->increment_total_collections(); + { + GCTraceTime(Debug, gc, phases) debug("Expand heap after concurrent mark", _gc_timer_cm); + _g1h->expand_heap_after_concurrent_mark(); + } + // Local statistics double recent_cleanup_time = (os::elapsedTime() - start); _total_cleanup_time += recent_cleanup_time; --- old/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp 2020-02-17 17:22:56.596257978 +0800 +++ new/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp 2020-02-17 17:22:56.598258050 +0800 @@ -159,7 +159,7 @@ _cur_resize_tlab_time_ms = 0.0; _cur_derived_pointer_table_update_time_ms = 0.0; _cur_clear_ct_time_ms = 0.0; - _cur_expand_heap_time_ms = 0.0; + _cur_resize_heap_time_ms = 0.0; _cur_ref_proc_time_ms = 0.0; _cur_collection_start_sec = 0.0; _root_region_scan_wait_time_ms = 0.0; @@ -466,7 +466,7 @@ _recorded_total_free_cset_time_ms + _recorded_total_rebuild_freelist_time_ms + _cur_fast_reclaim_humongous_time_ms + - _cur_expand_heap_time_ms + + _cur_resize_heap_time_ms + _cur_string_deduplication_time_ms; info_time("Post Evacuate Collection Set", sum_ms); @@ -518,7 +518,7 @@ if (UseTLAB && ResizeTLAB) { debug_time("Resize TLABs", _cur_resize_tlab_time_ms); } - debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms); + debug_time("Resize Heap After Collection", _cur_resize_heap_time_ms); return sum_ms; --- old/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp 2020-02-17 17:22:56.671260680 +0800 +++ new/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp 2020-02-17 17:22:56.673260752 +0800 @@ -146,7 +146,7 @@ double _cur_derived_pointer_table_update_time_ms; double _cur_clear_ct_time_ms; - double _cur_expand_heap_time_ms; + double _cur_resize_heap_time_ms; double _cur_ref_proc_time_ms; double _cur_collection_start_sec; @@ -260,8 +260,8 @@ _cur_clear_ct_time_ms = ms; } - void record_expand_heap_time(double ms) { - _cur_expand_heap_time_ms = ms; + void record_resize_heap_time(double ms) { + _cur_resize_heap_time_ms = ms; } void record_initial_evac_time(double ms) { @@ -399,8 +399,8 @@ return _cur_clear_ct_time_ms; } - double cur_expand_heap_time_ms() { - return _cur_expand_heap_time_ms; + double cur_resize_heap_time_ms() { + return _cur_resize_heap_time_ms; } double root_region_scan_wait_time_ms() { --- old/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp 2020-02-17 17:22:56.740263166 +0800 +++ new/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp 2020-02-17 17:22:56.743263274 +0800 @@ -26,6 +26,7 @@ #include "gc/g1/g1CollectedHeap.hpp" #include "gc/g1/g1HeapSizingPolicy.hpp" #include "gc/g1/g1Analytics.hpp" +#include "gc/g1/g1Policy.hpp" #include "logging/log.hpp" #include "runtime/globals.hpp" #include "utilities/debug.hpp" @@ -38,7 +39,8 @@ G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics) : _g1h(g1h), _analytics(analytics), - _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) { + _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()), + _minimum_desired_bytes_after_last_cm(MinHeapSize) { assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics); clear_ratio_check_data(); @@ -50,7 +52,7 @@ _pauses_since_start = 0; } -size_t G1HeapSizingPolicy::expansion_amount() { +size_t G1HeapSizingPolicy::expansion_amount_after_young_collection() { double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0; double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0; assert(GCTimeRatio > 0, @@ -160,3 +162,52 @@ return expand_bytes; } + +size_t G1HeapSizingPolicy::target_heap_capacity(size_t used_bytes, uintx free_ratio) { + const double free_percentage = (double) free_ratio / 100.0; + const double used_percentage = 1.0 - free_percentage; + + // We have to be careful here as these two calculations can overflow + // 32-bit size_t's. + double used_bytes_d = (double) used_bytes; + double desired_capacity_d = used_bytes_d / used_percentage; + // Let's make sure that they are both under the max heap size, which + // by default will make it fit into a size_t. + double desired_capacity_upper_bound = (double) MaxHeapSize; + desired_capacity_d = MIN2(desired_capacity_d, desired_capacity_upper_bound); + // We can now safely turn it into size_t's. + return (size_t) desired_capacity_d; +} + +size_t G1HeapSizingPolicy::expansion_amount_after_concurrent_mark() { + size_t cur_used_bytes = _g1h->non_young_capacity_bytes(); + + size_t minimum_desired_capacity = target_heap_capacity(cur_used_bytes, MinHeapFreeRatio); + + _minimum_desired_bytes_after_last_cm = _g1h->policy()->minimum_desired_bytes_after_concurrent_mark(cur_used_bytes); + // Use the smaller one between minimum_desired_capacity + // and predicted minimum_desired_bytes_after_concurrent_mark + // We still use minimum_desired_capacity because minimum_desired_bytes_after_concurrent_mark + // might include a lot of new allocated humongous objects + _minimum_desired_bytes_after_last_cm = MIN2(_minimum_desired_bytes_after_last_cm, minimum_desired_capacity); + + return _minimum_desired_bytes_after_last_cm > _g1h->capacity() ? + _minimum_desired_bytes_after_last_cm - _g1h->capacity() : 0; +} + +size_t G1HeapSizingPolicy::shrink_amount_after_mixed_collections() { + size_t shrink_bytes = 0; + const size_t capacity_after_gc = _g1h->capacity(); + const size_t used_after_gc = capacity_after_gc - _g1h->unused_committed_regions_in_bytes(); + size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio); + // soft_max_capacity can be smaller + maximum_desired_capacity = MIN2(maximum_desired_capacity, _g1h->soft_max_capacity()); + // Make sure not less than _minimum_desired_bytes_after_last_cm + maximum_desired_capacity = MAX2(maximum_desired_capacity, _minimum_desired_bytes_after_last_cm); + + if (capacity_after_gc > maximum_desired_capacity) { + shrink_bytes = capacity_after_gc - maximum_desired_capacity; + } + + return shrink_bytes; +} --- old/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp 2020-02-17 17:22:56.808265615 +0800 +++ new/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp 2020-02-17 17:22:56.810265687 +0800 @@ -45,6 +45,9 @@ double _ratio_over_threshold_sum; uint _pauses_since_start; + // Mininum heap capacity after last concurrent mark + size_t _minimum_desired_bytes_after_last_cm; + protected: G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics); @@ -52,8 +55,12 @@ // If an expansion would be appropriate, because recent GC overhead had // exceeded the desired limit, return an amount to expand by. - virtual size_t expansion_amount(); + virtual size_t expansion_amount_after_young_collection(); + virtual size_t expansion_amount_after_concurrent_mark(); + virtual size_t shrink_amount_after_mixed_collections(); + // Calculate the target capacity based on used bytes and free ratio + virtual size_t target_heap_capacity(size_t used_bytes, uintx free_ratio); // Clear ratio tracking data used by expansion_amount(). void clear_ratio_check_data(); --- old/src/hotspot/share/gc/g1/g1IHOPControl.cpp 2020-02-17 17:22:56.877268101 +0800 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.cpp 2020-02-17 17:22:56.879268173 +0800 @@ -108,7 +108,7 @@ double safe_total_heap_percentage = MIN2((double)(_heap_reserve_percent + _heap_waste_percent), 100.0); return (size_t)MIN2( - G1CollectedHeap::heap()->max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0, + G1CollectedHeap::heap()->soft_max_capacity() * (100.0 - safe_total_heap_percentage) / 100.0, _target_occupancy * (100.0 - _heap_waste_percent) / 100.0 ); } @@ -187,3 +187,8 @@ predict(&_marking_times_s), have_enough_data_for_prediction()); } + +size_t G1AdaptiveIHOPControl::predict_unstrained_buffer_size() const { + // Besides the young size, the promotion bytes of Prepare Mixed and 1st Mixed GC will be counted + return _last_unrestrained_young_size + _last_allocated_bytes * 2; +} --- old/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-02-17 17:22:56.942270443 +0800 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-02-17 17:22:56.945270551 +0800 @@ -77,6 +77,8 @@ virtual void print(); virtual void send_trace_event(G1NewTracer* tracer); + + virtual size_t predict_unstrained_buffer_size() const { return 0; } }; // The returned concurrent mark starting occupancy threshold is a fixed value @@ -147,6 +149,7 @@ virtual void print(); virtual void send_trace_event(G1NewTracer* tracer); + virtual size_t predict_unstrained_buffer_size() const; }; #endif // SHARE_GC_G1_G1IHOPCONTROL_HPP --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2020-02-17 17:22:57.008272821 +0800 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2020-02-17 17:22:57.010272893 +0800 @@ -679,6 +679,9 @@ _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms); } + if (collector_state()->finish_of_mixed_gc()) { + collector_state()->set_finish_of_mixed_gc(false); + } if (collector_state()->in_young_gc_before_mixed()) { assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC"); // This has been the young GC before we start doing mixed GCs. We already @@ -692,6 +695,7 @@ if (!next_gc_should_be_mixed("continue mixed GCs", "do not continue mixed GCs")) { collector_state()->set_in_young_only_phase(true); + collector_state()->set_finish_of_mixed_gc(true); clear_collection_set_candidates(); maybe_start_marking(); @@ -1411,3 +1415,10 @@ // the survivor regions from this evacuation pause as 'young' // at the start of the next. } + +size_t G1Policy::minimum_desired_bytes_after_concurrent_mark(size_t used_bytes) { + size_t minimum_desired_buffer_size = _ihop_control->predict_unstrained_buffer_size(); + return minimum_desired_buffer_size != 0 ? + minimum_desired_buffer_size : _young_list_max_length * HeapRegion::GrainBytes + + _reserve_regions * HeapRegion::GrainBytes + used_bytes; +} --- old/src/hotspot/share/gc/g1/g1Policy.hpp 2020-02-17 17:22:57.108276423 +0800 +++ new/src/hotspot/share/gc/g1/g1Policy.hpp 2020-02-17 17:22:57.111276532 +0800 @@ -444,6 +444,7 @@ virtual bool force_upgrade_to_full() { return false; } + size_t minimum_desired_bytes_after_concurrent_mark(size_t used_bytes); }; #endif // SHARE_GC_G1_G1POLICY_HPP --- old/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java 2020-02-17 17:22:57.188279305 +0800 +++ new/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java 2020-02-17 17:22:57.191279414 +0800 @@ -140,7 +140,7 @@ new LogMessageWithLevel("String Deduplication", Level.DEBUG), new LogMessageWithLevel("Queue Fixup", Level.DEBUG), new LogMessageWithLevel("Table Fixup", Level.DEBUG), - new LogMessageWithLevel("Expand Heap After Collection", Level.DEBUG), + new LogMessageWithLevel("Resize Heap After Collection", Level.DEBUG), new LogMessageWithLevel("Region Register", Level.DEBUG), new LogMessageWithLevel("Prepare Heap Roots", Level.DEBUG), // Free CSet