--- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-08-13 04:14:07.011213819 +0000 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-08-13 04:14:06.847210674 +0000 @@ -872,7 +872,7 @@ if (result != NULL) { size_t size_in_regions = humongous_obj_size_in_regions(word_size); policy()->old_gen_alloc_tracker()-> - add_allocated_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes); + add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes); return result; } @@ -892,6 +892,9 @@ assert(succeeded, "only way to get back a non-NULL result"); log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT, Thread::current()->name(), p2i(result)); + size_t size_in_regions = humongous_obj_size_in_regions(word_size); + policy()->old_gen_alloc_tracker()-> + record_collection_pause_humongous_allocation(size_in_regions * HeapRegion::GrainBytes); return result; } --- old/src/hotspot/share/gc/g1/g1IHOPControl.cpp 2020-08-13 04:14:07.843229771 +0000 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.cpp 2020-08-13 04:14:07.687226779 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,11 +29,12 @@ #include "gc/g1/g1Trace.hpp" #include "logging/log.hpp" -G1IHOPControl::G1IHOPControl(double initial_ihop_percent) : +G1IHOPControl::G1IHOPControl(double initial_ihop_percent, + G1OldGenAllocationTracker const* old_gen_alloc_tracker) : _initial_ihop_percent(initial_ihop_percent), _target_occupancy(0), _last_allocation_time_s(0.0), - _last_allocated_bytes(0) + _old_gen_alloc_tracker(old_gen_alloc_tracker) { assert(_initial_ihop_percent >= 0.0 && _initial_ihop_percent <= 100.0, "Initial IHOP value must be between 0 and 100 but is %.3f", initial_ihop_percent); } @@ -44,11 +45,10 @@ _target_occupancy = new_target_occupancy; } -void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size) { +void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t additional_buffer_size) { assert(allocation_time_s >= 0.0, "Allocation time must be positive but is %.3f", allocation_time_s); _last_allocation_time_s = allocation_time_s; - _last_allocated_bytes = allocated_bytes; } void G1IHOPControl::print() { @@ -60,9 +60,9 @@ percent_of(cur_conc_mark_start_threshold, _target_occupancy), _target_occupancy, G1CollectedHeap::heap()->used(), - _last_allocated_bytes, + _old_gen_alloc_tracker->last_period_old_bytes(), _last_allocation_time_s * 1000.0, - _last_allocation_time_s > 0.0 ? _last_allocated_bytes / _last_allocation_time_s : 0.0, + _last_allocation_time_s > 0.0 ? _old_gen_alloc_tracker->last_period_old_bytes() / _last_allocation_time_s : 0.0, last_marking_length_s() * 1000.0); } @@ -71,21 +71,23 @@ tracer->report_basic_ihop_statistics(get_conc_mark_start_threshold(), _target_occupancy, G1CollectedHeap::heap()->used(), - _last_allocated_bytes, + _old_gen_alloc_tracker->last_period_old_bytes(), _last_allocation_time_s, last_marking_length_s()); } -G1StaticIHOPControl::G1StaticIHOPControl(double ihop_percent) : - G1IHOPControl(ihop_percent), +G1StaticIHOPControl::G1StaticIHOPControl(double ihop_percent, + G1OldGenAllocationTracker const* old_gen_alloc_tracker) : + G1IHOPControl(ihop_percent, old_gen_alloc_tracker), _last_marking_length_s(0.0) { } G1AdaptiveIHOPControl::G1AdaptiveIHOPControl(double ihop_percent, + G1OldGenAllocationTracker const* old_gen_alloc_tracker, G1Predictions const* predictor, size_t heap_reserve_percent, size_t heap_waste_percent) : - G1IHOPControl(ihop_percent), + G1IHOPControl(ihop_percent, old_gen_alloc_tracker), _heap_reserve_percent(heap_reserve_percent), _heap_waste_percent(heap_waste_percent), _predictor(predictor), @@ -145,13 +147,16 @@ } } +double G1AdaptiveIHOPControl::last_mutator_period_old_allocation_rate() const { + assert(_last_allocation_time_s > 0, "This should not be called when the last GC is full"); + + return _old_gen_alloc_tracker->last_period_net_survived_old_bytes() / _last_allocation_time_s; + } + void G1AdaptiveIHOPControl::update_allocation_info(double allocation_time_s, - size_t allocated_bytes, size_t additional_buffer_size) { - G1IHOPControl::update_allocation_info(allocation_time_s, allocated_bytes, additional_buffer_size); - - double allocation_rate = (double) allocated_bytes / allocation_time_s; - _allocation_rate_s.add(allocation_rate); + G1IHOPControl::update_allocation_info(allocation_time_s, additional_buffer_size); + _allocation_rate_s.add(last_mutator_period_old_allocation_rate()); _last_unrestrained_young_size = additional_buffer_size; } --- old/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-08-13 04:14:08.635244957 +0000 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-08-13 04:14:08.479241965 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #ifndef SHARE_GC_G1_G1IHOPCONTROL_HPP #define SHARE_GC_G1_G1IHOPCONTROL_HPP +#include "gc/g1/g1OldGenAllocationTracker.hpp" #include "memory/allocation.hpp" #include "utilities/numberSeq.hpp" @@ -44,12 +45,12 @@ // Most recent complete mutator allocation period in seconds. double _last_allocation_time_s; - // Amount of bytes allocated during _last_allocation_time_s. - size_t _last_allocated_bytes; - // Initialize an instance with the initial IHOP value in percent. The target - // occupancy will be updated at the first heap expansion. - G1IHOPControl(double initial_ihop_percent); + const G1OldGenAllocationTracker* _old_gen_alloc_tracker; + // Initialize an instance with the old gen allocation tracker and the + // initial IHOP value in percent. The target occupancy will be updated + // at the first heap expansion. + G1IHOPControl(double ihop_percent, G1OldGenAllocationTracker const* old_gen_alloc_tracker); // Most recent time from the end of the concurrent start to the start of the first // mixed gc. @@ -70,7 +71,7 @@ // Together with the target occupancy, this additional buffer should contain the // difference between old gen size and total heap size at the start of reclamation, // and space required for that reclamation. - virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size); + virtual void update_allocation_info(double allocation_time_s, size_t additional_buffer_size); // Update the time spent in the mutator beginning from the end of concurrent start to // the first mixed gc. virtual void update_marking_length(double marking_length_s) = 0; @@ -88,7 +89,7 @@ protected: double last_marking_length_s() const { return _last_marking_length_s; } public: - G1StaticIHOPControl(double ihop_percent); + G1StaticIHOPControl(double ihop_percent, G1OldGenAllocationTracker const* old_gen_alloc_tracker); size_t get_conc_mark_start_threshold() { guarantee(_target_occupancy > 0, "Target occupancy must have been initialized."); @@ -132,17 +133,22 @@ // end of marking. This is typically lower than the requested threshold, as the // algorithm needs to consider restrictions by the environment. size_t actual_target_threshold() const; + + // This method calculates the old gen allocation rate based on the net survived + // bytes that are allocated in the old generation in the last mutator period. + double last_mutator_period_old_allocation_rate() const; protected: virtual double last_marking_length_s() const { return _marking_times_s.last(); } public: G1AdaptiveIHOPControl(double ihop_percent, + G1OldGenAllocationTracker const* old_gen_alloc_tracker, G1Predictions const* predictor, size_t heap_reserve_percent, // The percentage of total heap capacity that should not be tapped into. size_t heap_waste_percent); // The percentage of the free space in the heap that we think is not usable for allocation. virtual size_t get_conc_mark_start_threshold(); - virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size); + virtual void update_allocation_info(double allocation_time_s, size_t additional_buffer_size); virtual void update_marking_length(double marking_length_s); virtual void print(); --- old/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.cpp 2020-08-13 04:14:09.419259988 +0000 +++ new/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.cpp 2020-08-13 04:14:09.263256997 +0000 @@ -24,19 +24,40 @@ #include "precompiled.hpp" #include "gc/g1/g1OldGenAllocationTracker.hpp" +#include "logging/log.hpp" G1OldGenAllocationTracker::G1OldGenAllocationTracker() : - _last_cycle_old_bytes(0), - _last_cycle_duration(0.0), - _allocated_bytes_since_last_gc(0) { + _last_period_old_bytes(0), + _last_period_humongous_bytes(0), + _humongous_bytes_after_last_gc(0), + _humongous_bytes_after_penultimate_gc(0), + _allocated_bytes_since_last_gc(0), + _allocated_humongous_bytes_since_last_gc(0) { } -void G1OldGenAllocationTracker::reset_after_full_gc() { - _last_cycle_duration = 0; - reset_cycle_after_gc(); +void G1OldGenAllocationTracker::reset_after_gc(size_t humongous_bytes_after_gc) { + // Record last + _last_period_old_bytes = _allocated_bytes_since_last_gc; + _last_period_humongous_bytes = _allocated_humongous_bytes_since_last_gc; + _humongous_bytes_after_penultimate_gc = _humongous_bytes_after_last_gc; + _humongous_bytes_after_last_gc = humongous_bytes_after_gc; + // Reset + _allocated_bytes_since_last_gc = 0; + _allocated_humongous_bytes_since_last_gc = 0; + log_debug(gc, alloc, stats)("Old generation allocation in the last mutator period, " + "old gen allocated: " SIZE_FORMAT "B, humongous allocated: " SIZE_FORMAT "B.", + _last_period_old_bytes, _last_period_humongous_bytes); } -void G1OldGenAllocationTracker::reset_after_young_gc(double allocation_duration_s) { - _last_cycle_duration = allocation_duration_s; - reset_cycle_after_gc(); -} \ No newline at end of file +size_t G1OldGenAllocationTracker::last_period_net_survived_old_bytes() const { + // The upper limit of the freed region count is the number of regions allocated + // since the last gc. When more humongous regions survived the current gc than + // survived the previous one, deduct the increment. + size_t freed_humongous_bytes = _last_period_humongous_bytes; + + if (freed_humongous_bytes > 0 && _humongous_bytes_after_penultimate_gc < _humongous_bytes_after_last_gc) { + freed_humongous_bytes -= _humongous_bytes_after_last_gc - _humongous_bytes_after_penultimate_gc; + } + assert(_last_period_old_bytes >= freed_humongous_bytes, "Allocation rate cannot be negative"); + return _last_period_old_bytes - freed_humongous_bytes; + } --- old/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.hpp 2020-08-13 04:14:10.211275174 +0000 +++ new/src/hotspot/share/gc/g1/g1OldGenAllocationTracker.hpp 2020-08-13 04:14:10.055272183 +0000 @@ -28,34 +28,55 @@ #include "gc/g1/heapRegion.hpp" #include "memory/allocation.hpp" +class G1AdaptiveIHOPControl; + // Track allocation details in the old generation. class G1OldGenAllocationTracker : public CHeapObj { // New bytes allocated in old gen between the end of the last GC and + // the end of the GC before that. This includes humongous object allocation. + size_t _last_period_old_bytes; + // New bytes allocated in humongous regions between the end of the last GC and // the end of the GC before that. - size_t _last_cycle_old_bytes; - // The number of seconds between the end of the last GC and - // the end of the GC before that. - double _last_cycle_duration; + size_t _last_period_humongous_bytes; - size_t _allocated_bytes_since_last_gc; + size_t _humongous_bytes_after_last_gc; + size_t _humongous_bytes_after_penultimate_gc; - void reset_cycle_after_gc() { - _last_cycle_old_bytes = _allocated_bytes_since_last_gc; - _allocated_bytes_since_last_gc = 0; - } + size_t _allocated_bytes_since_last_gc; + size_t _allocated_humongous_bytes_since_last_gc; public: G1OldGenAllocationTracker(); // Add the given number of bytes to the total number of allocated bytes in the old gen. void add_allocated_bytes_since_last_gc(size_t bytes) { _allocated_bytes_since_last_gc += bytes; } - size_t last_cycle_old_bytes() { return _last_cycle_old_bytes; } + void add_allocated_humongous_bytes_since_last_gc(size_t bytes) { + _allocated_humongous_bytes_since_last_gc += bytes; + _allocated_bytes_since_last_gc += bytes; + } + + // Record a humongous allocation during a collection pause. + // In g1CollectedHeap, when a humongous allocation fails, the heap will attempt + // to trigger a GC and try to allocate the required bytes during it. These bytes + // are then not counted in any mutator period but as survived bytes after GC. + // Otherwise, they would distort our estimation for how many humongous bytes are + // freed in a mutator period. + void record_collection_pause_humongous_allocation(size_t bytes) { + _humongous_bytes_after_last_gc += bytes; + } - double last_cycle_duration() { return _last_cycle_duration; } + size_t last_period_old_bytes() const { return _last_period_old_bytes; } // Reset stats after a collection. - void reset_after_full_gc(); - void reset_after_young_gc(double allocation_duration_s); + void reset_after_gc(size_t humongous_bytes_after_gc); + + // This is used by Adaptive IHOP to sample the old gen allocation rate. + // Different from the regular old gen allocation rate, this method considers the + // humongous objects that can be reclaimed early by young GCs. Since we cannot + // track the life cycle of individual humongous objects, we assume that such + // objects were all newly allocated and not survivors, unless more were + // reclaimed than allocated. + size_t last_period_net_survived_old_bytes() const; }; #endif // SHARE_VM_GC_G1_G1OLDGENALLOCATIONTRACKER_HPP \ No newline at end of file --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2020-08-13 04:14:11.003290359 +0000 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2020-08-13 04:14:10.847287368 +0000 @@ -57,7 +57,8 @@ _analytics(new G1Analytics(&_predictor)), _remset_tracker(), _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)), - _ihop_control(create_ihop_control(&_predictor)), + _old_gen_alloc_tracker(), + _ihop_control(create_ihop_control(&_old_gen_alloc_tracker, &_predictor)), _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)), _full_collection_start_sec(0.0), _young_list_target_length(0), @@ -72,7 +73,6 @@ _rs_length(0), _rs_length_prediction(0), _pending_cards_at_gc_start(0), - _old_gen_alloc_tracker(), _concurrent_start_to_mixed(), _collection_set(NULL), _g1h(NULL), @@ -469,7 +469,7 @@ update_young_list_max_and_target_length(); update_rs_length_prediction(); - _old_gen_alloc_tracker.reset_after_full_gc(); + _old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * HeapRegion::GrainBytes); record_pause(FullGC, _full_collection_start_sec, end_sec); } @@ -804,9 +804,8 @@ // predicted target occupancy. size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); - _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0); - update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(), - _old_gen_alloc_tracker.last_cycle_old_bytes(), + _old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * HeapRegion::GrainBytes); + update_ihop_prediction(app_time_ms / 1000.0, last_unrestrained_young_length * HeapRegion::GrainBytes, is_young_only_pause(this_pause)); @@ -844,19 +843,20 @@ scan_logged_cards_time_goal_ms); } -G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){ +G1IHOPControl* G1Policy::create_ihop_control(const G1OldGenAllocationTracker* old_gen_alloc_tracker, + const G1Predictions* predictor) { if (G1UseAdaptiveIHOP) { return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, + old_gen_alloc_tracker, predictor, G1ReservePercent, G1HeapWastePercent); } else { - return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent); + return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent, old_gen_alloc_tracker); } } void G1Policy::update_ihop_prediction(double mutator_time_s, - size_t mutator_alloc_bytes, size_t young_gen_size, bool this_gc_was_young_only) { // Always try to update IHOP prediction. Even evacuation failures give information @@ -885,7 +885,7 @@ // marking, which makes any prediction useless. This increases the accuracy of the // prediction. if (this_gc_was_young_only && mutator_time_s > min_valid_time) { - _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); + _ihop_control->update_allocation_info(mutator_time_s, young_gen_size); report = true; } --- old/src/hotspot/share/gc/g1/g1Policy.hpp 2020-08-13 04:14:11.807305775 +0000 +++ new/src/hotspot/share/gc/g1/g1Policy.hpp 2020-08-13 04:14:11.647302708 +0000 @@ -56,10 +56,10 @@ class G1Policy: public CHeapObj { private: - static G1IHOPControl* create_ihop_control(const G1Predictions* predictor); + static G1IHOPControl* create_ihop_control(const G1OldGenAllocationTracker* old_gen_alloc_tracker, + const G1Predictions* predictor); // Update the IHOP control with necessary statistics. void update_ihop_prediction(double mutator_time_s, - size_t mutator_alloc_bytes, size_t young_gen_size, bool this_gc_was_young_only); void report_ihop_statistics(); @@ -68,6 +68,10 @@ G1Analytics* _analytics; G1RemSetTrackingPolicy _remset_tracker; G1MMUTracker* _mmu_tracker; + + // Tracking the allocation in the old generation between + // two GCs. + G1OldGenAllocationTracker _old_gen_alloc_tracker; G1IHOPControl* _ihop_control; GCPolicyCounters* _policy_counters; @@ -101,10 +105,6 @@ size_t _pending_cards_at_gc_start; - // Tracking the allocation in the old generation between - // two GCs. - G1OldGenAllocationTracker _old_gen_alloc_tracker; - G1ConcurrentStartToMixedTimeTracker _concurrent_start_to_mixed; bool should_update_surv_rate_group_predictors() { --- old/test/hotspot/gtest/gc/g1/test_g1IHOPControl.cpp 2020-08-13 04:14:12.595320884 +0000 +++ new/test/hotspot/gtest/gc/g1/test_g1IHOPControl.cpp 2020-08-13 04:14:12.439317893 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,14 +24,40 @@ #include "precompiled.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1IHOPControl.hpp" +#include "gc/g1/g1OldGenAllocationTracker.hpp" #include "gc/g1/g1Predictions.hpp" #include "unittest.hpp" -static void test_update(G1IHOPControl* ctrl, double alloc_time, - size_t alloc_amount, size_t young_size, - double mark_time) { +static void test_update_allocation_tracker(G1OldGenAllocationTracker* alloc_tracker, + size_t alloc_amount) { + alloc_tracker->add_allocated_bytes_since_last_gc(alloc_amount); + alloc_tracker->reset_after_gc((size_t)0); +} + +static void test_update(G1IHOPControl* ctrl, + G1OldGenAllocationTracker* alloc_tracker, + double alloc_time, size_t alloc_amount, + size_t young_size, double mark_time) { + test_update_allocation_tracker(alloc_tracker, alloc_amount); for (int i = 0; i < 100; i++) { - ctrl->update_allocation_info(alloc_time, alloc_amount, young_size); + ctrl->update_allocation_info(alloc_time, young_size); + ctrl->update_marking_length(mark_time); + } +} + +static void test_update_humongous(G1IHOPControl* ctrl, + G1OldGenAllocationTracker* alloc_tracker, + double alloc_time, + size_t alloc_amount_non_hum, + size_t alloc_amount_hum, + size_t humongous_bytes_after_last_gc, + size_t young_size, + double mark_time) { + alloc_tracker->add_allocated_bytes_since_last_gc(alloc_amount_non_hum); + alloc_tracker->add_allocated_humongous_bytes_since_last_gc(alloc_amount_hum); + alloc_tracker->reset_after_gc(humongous_bytes_after_last_gc); + for (int i = 0; i < 100; i++) { + ctrl->update_allocation_info(alloc_time, young_size); ctrl->update_marking_length(mark_time); } } @@ -45,13 +71,15 @@ const size_t initial_ihop = 45; - G1StaticIHOPControl ctrl(initial_ihop); + G1OldGenAllocationTracker alloc_tracker; + G1StaticIHOPControl ctrl(initial_ihop, &alloc_tracker); ctrl.update_target_occupancy(100); size_t threshold = ctrl.get_conc_mark_start_threshold(); EXPECT_EQ(initial_ihop, threshold); - ctrl.update_allocation_info(100.0, 100, 100); + test_update_allocation_tracker(&alloc_tracker, 100); + ctrl.update_allocation_info(100.0, 100); threshold = ctrl.get_conc_mark_start_threshold(); EXPECT_EQ(initial_ihop, threshold); @@ -60,12 +88,12 @@ EXPECT_EQ(initial_ihop, threshold); // Whatever we pass, the IHOP value must stay the same. - test_update(&ctrl, 2, 10, 10, 3); + test_update(&ctrl, &alloc_tracker, 2, 10, 10, 3); threshold = ctrl.get_conc_mark_start_threshold(); EXPECT_EQ(initial_ihop, threshold); - test_update(&ctrl, 12, 10, 10, 3); + test_update(&ctrl, &alloc_tracker, 12, 10, 10, 3); threshold = ctrl.get_conc_mark_start_threshold(); EXPECT_EQ(initial_ihop, threshold); @@ -85,8 +113,9 @@ // The final IHOP value is always // target_size - (young_size + alloc_amount/alloc_time * marking_time) + G1OldGenAllocationTracker alloc_tracker; G1Predictions pred(0.95); - G1AdaptiveIHOPControl ctrl(initial_threshold, &pred, 0, 0); + G1AdaptiveIHOPControl ctrl(initial_threshold, &alloc_tracker, &pred, 0, 0); ctrl.update_target_occupancy(target_size); // First "load". @@ -102,7 +131,8 @@ EXPECT_EQ(initial_threshold, threshold); for (size_t i = 0; i < G1AdaptiveIHOPNumInitialSamples - 1; i++) { - ctrl.update_allocation_info(alloc_time1, alloc_amount1, young_size); + test_update_allocation_tracker(&alloc_tracker, alloc_amount1); + ctrl.update_allocation_info(alloc_time1, young_size); ctrl.update_marking_length(marking_time1); // Not enough data yet. threshold = ctrl.get_conc_mark_start_threshold(); @@ -110,7 +140,7 @@ ASSERT_EQ(initial_threshold, threshold) << "on step " << i; } - test_update(&ctrl, alloc_time1, alloc_amount1, young_size, marking_time1); + test_update(&ctrl, &alloc_tracker, alloc_time1, alloc_amount1, young_size, marking_time1); threshold = ctrl.get_conc_mark_start_threshold(); @@ -123,7 +153,7 @@ const size_t settled_ihop2 = target_size - (young_size + alloc_amount2 / alloc_time2 * marking_time2); - test_update(&ctrl, alloc_time2, alloc_amount2, young_size, marking_time2); + test_update(&ctrl, &alloc_tracker, alloc_time2, alloc_amount2, young_size, marking_time2); threshold = ctrl.get_conc_mark_start_threshold(); @@ -135,15 +165,82 @@ const size_t marking_time3 = 2; const size_t settled_ihop3 = 0; - test_update(&ctrl, alloc_time3, alloc_amount3, young_size, marking_time3); + test_update(&ctrl, &alloc_tracker, alloc_time3, alloc_amount3, young_size, marking_time3); threshold = ctrl.get_conc_mark_start_threshold(); EXPECT_EQ(settled_ihop3, threshold); // And back to some arbitrary value. - test_update(&ctrl, alloc_time2, alloc_amount2, young_size, marking_time2); + test_update(&ctrl, &alloc_tracker, alloc_time2, alloc_amount2, young_size, marking_time2); threshold = ctrl.get_conc_mark_start_threshold(); EXPECT_GT(threshold, settled_ihop3); } + +TEST_VM(G1AdaptiveIHOPControl, humongous) { + // Test requires G1 + if (!UseG1GC) { + return; + } + + const size_t initial_threshold = 45; + const size_t young_size = 10; + const size_t target_size = 100; + const double duration = 10.0; + const size_t marking_time = 2; + + G1OldGenAllocationTracker alloc_tracker; + G1Predictions pred(0.95); + G1AdaptiveIHOPControl ctrl(initial_threshold, &alloc_tracker, &pred, 0, 0); + ctrl.update_target_occupancy(target_size); + + size_t old_bytes = 100; + size_t humongous_bytes = 200; + size_t humongous_bytes_after_gc = 150; + size_t humongous_bytes_after_last_gc = 50; + // Load 1 + test_update_humongous(&ctrl, &alloc_tracker, duration, 0, humongous_bytes, + humongous_bytes_after_last_gc, young_size, marking_time); + // Test threshold + size_t threshold; + threshold = ctrl.get_conc_mark_start_threshold(); + // Adjusted allocated bytes: + // Total bytes: humongous_bytes + // Freed hum bytes: humongous_bytes - humongous_bytes_after_last_gc + double alloc_rate = humongous_bytes_after_last_gc / duration; + size_t target_threshold = target_size - (size_t)(young_size + alloc_rate * marking_time); + + EXPECT_EQ(threshold, target_threshold); + + // Load 2 + G1AdaptiveIHOPControl ctrl2(initial_threshold, &alloc_tracker, &pred, 0, 0); + ctrl2.update_target_occupancy(target_size); + test_update_humongous(&ctrl2, &alloc_tracker, duration, old_bytes, humongous_bytes, + humongous_bytes_after_gc, young_size, marking_time); + threshold = ctrl2.get_conc_mark_start_threshold(); + // Adjusted allocated bytes: + // Total bytes: old_bytes + humongous_bytes + // Freed hum bytes: humongous_bytes - (humongous_bytes_after_gc - humongous_bytes_after_last_gc) + alloc_rate = (old_bytes + (humongous_bytes_after_gc - humongous_bytes_after_last_gc)) / duration; + target_threshold = target_size - (size_t)(young_size + alloc_rate * marking_time); + + EXPECT_EQ(threshold, target_threshold); + + // Load 3 + humongous_bytes_after_last_gc = humongous_bytes_after_gc; + humongous_bytes_after_gc = 50; + G1AdaptiveIHOPControl ctrl3(initial_threshold, &alloc_tracker, &pred, 0, 0); + ctrl3.update_target_occupancy(target_size); + test_update_humongous(&ctrl3, &alloc_tracker, duration, old_bytes, humongous_bytes, + humongous_bytes_after_gc, young_size, marking_time); + threshold = ctrl3.get_conc_mark_start_threshold(); + // Adjusted allocated bytes: + // All humongous are cleaned up since humongous_bytes_after_gc < humongous_bytes_after_last_gc + // Total bytes: old_bytes + humongous_bytes + // Freed hum bytes: humongous_bytes + alloc_rate = old_bytes / duration; + target_threshold = target_size - (size_t)(young_size + alloc_rate * marking_time); + + EXPECT_EQ(threshold, target_threshold); +} \ No newline at end of file --- /dev/null 2020-06-16 16:13:00.016000000 +0000 +++ new/test/hotspot/gtest/gc/g1/test_g1OldGenAllocationTracker.cpp 2020-08-13 04:14:13.227333002 +0000 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2020, Amazon.com, Inc. or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/g1/g1OldGenAllocationTracker.hpp" +#include "unittest.hpp" + +TEST_VM(G1OldGenAllocationTracker, simple) { + G1OldGenAllocationTracker alloc_tracker; + size_t old_bytes = 100; + size_t humongous_bytes = 200; + alloc_tracker.add_allocated_bytes_since_last_gc(old_bytes); + alloc_tracker.add_allocated_humongous_bytes_since_last_gc(humongous_bytes); + alloc_tracker.reset_after_gc((size_t)0); + + EXPECT_EQ(alloc_tracker.last_period_old_bytes(), old_bytes + humongous_bytes); +}