--- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-11-06 11:34:04.436277166 +0100 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-11-06 11:34:04.348274599 +0100 @@ -427,6 +427,11 @@ return new_obj; } +size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) { + assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size); + return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; +} + // If could fit into free regions w/o expansion, try. // Otherwise, if can expand, do so. // Otherwise, if using ex regions might help, try with ex given back. @@ -436,7 +441,7 @@ verify_region_sets_optional(); uint first = G1_NO_HRM_INDEX; - uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords); + uint obj_regions = (uint) humongous_obj_size_in_regions(word_size); if (obj_regions == 1) { // Only one region to allocate, try to use a fast path by directly allocating @@ -1036,6 +1041,7 @@ // collection hoping that there's enough space in the heap. result = humongous_obj_allocate(word_size, AllocationContext::current()); if (result != NULL) { + g1_policy()->add_last_old_allocated_bytes(humongous_obj_size_in_regions(word_size) * HeapRegion::GrainBytes); return result; } @@ -5283,6 +5289,8 @@ } void G1CollectedHeap::record_obj_copy_mem_stats() { + g1_policy()->add_last_old_allocated_bytes(_old_evac_stats.allocated() * HeapWordSize); + _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats), create_g1_evac_summary(&_old_evac_stats)); } --- old/src/share/vm/gc/g1/g1CollectedHeap.hpp 2015-11-06 11:34:05.025294348 +0100 +++ new/src/share/vm/gc/g1/g1CollectedHeap.hpp 2015-11-06 11:34:04.941291898 +0100 @@ -1367,6 +1367,10 @@ return (region_size / 2); } + // Returns the number of regions the humongous object of the given word size + // covers. + static size_t humongous_obj_size_in_regions(size_t word_size); + // Update mod union table with the set of dirty cards. void updateModUnion(); --- old/src/share/vm/gc/g1/g1CollectorPolicy.cpp 2015-11-06 11:34:05.569310217 +0100 +++ new/src/share/vm/gc/g1/g1CollectorPolicy.cpp 2015-11-06 11:34:05.484307738 +0100 @@ -28,6 +28,7 @@ #include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" +#include "gc/g1/g1IHOPControl.hpp" #include "gc/g1/g1ErgoVerbose.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1Log.hpp" @@ -148,7 +149,11 @@ _recorded_survivor_tail(NULL), _survivors_age_table(true), - _gc_overhead_perc(0.0) { + _gc_overhead_perc(0.0), + + _last_old_allocated_bytes(0), + _ihop_control(NULL), + _initial_mark_to_mixed() { // SurvRateGroups below must be initialized after the predictor because they // indirectly use it through this object passed to their constructor. @@ -288,6 +293,12 @@ _collectionSetChooser = new CollectionSetChooser(); } +G1CollectorPolicy::~G1CollectorPolicy() { + if (_ihop_control != NULL) { + delete _ihop_control; + } +} + double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { return _predictor.get_new_prediction(seq); } @@ -317,6 +328,8 @@ if (max_young_size != MaxNewSize) { FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); } + + _ihop_control = create_ihop_control(); } G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } @@ -511,24 +524,24 @@ return _young_gen_sizer->max_desired_young_length(); } -void G1CollectorPolicy::update_young_list_max_and_target_length() { - update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq)); +void G1CollectorPolicy::update_young_list_max_and_target_length(size_t* unbounded_target_length) { + update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq), unbounded_target_length); } -void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) { - update_young_list_target_length(rs_lengths); +void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths, size_t* unbounded_target_length) { + update_young_list_target_length(rs_lengths, unbounded_target_length); update_max_gc_locker_expansion(); } -void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { - _young_list_target_length = bounded_young_list_target_length(rs_lengths); +void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths, size_t* unbounded_target_length) { + _young_list_target_length = bounded_young_list_target_length(rs_lengths, unbounded_target_length); } void G1CollectorPolicy::update_young_list_target_length() { update_young_list_target_length(get_new_prediction(_rs_lengths_seq)); } -uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths) const { +uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths, size_t* unbounded_target_length) const { // Calculate the absolute and desired min bounds. // This is how many young regions we already have (currently: the survivors). @@ -541,15 +554,7 @@ desired_min_length = MAX2(desired_min_length, absolute_min_length); // Calculate the absolute and desired max bounds. - // We will try our best not to "eat" into the reserve. - uint absolute_max_length = 0; - if (_free_regions_at_end_of_collection > _reserve_regions) { - absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; - } uint desired_max_length = calculate_young_list_desired_max_length(); - if (desired_max_length > absolute_max_length) { - desired_max_length = absolute_max_length; - } uint young_list_target_length = 0; if (adaptive_young_list_length()) { @@ -570,6 +575,19 @@ young_list_target_length = _young_list_fixed_length; } + if (unbounded_target_length != NULL) { + *unbounded_target_length = young_list_target_length; + } + + // We will try our best not to "eat" into the reserve. + uint absolute_max_length = 0; + if (_free_regions_at_end_of_collection > _reserve_regions) { + absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; + } + if (desired_max_length > absolute_max_length) { + desired_max_length = absolute_max_length; + } + // Make sure we don't go over the desired max length, nor under the // desired min length. In case they clash, desired_min_length wins // which is why that test is second. @@ -827,6 +845,10 @@ update_young_list_max_and_target_length(); update_rs_lengths_prediction(); _collectionSetChooser->clear(); + + _last_old_allocated_bytes = 0; + + record_pause(FullGC, _full_collection_start_sec, end_sec); } void G1CollectorPolicy::record_stop_world_start() { @@ -884,7 +906,7 @@ _cur_mark_stop_world_time_ms += elapsed_time_ms; _prev_collection_pause_end_ms += elapsed_time_ms; - _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec); + record_pause(Remark, _mark_remark_start_sec, end_time_sec); } void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { @@ -895,6 +917,10 @@ bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", "skip last young-only gc"); collector_state()->set_last_young_gc(should_continue_with_reclaim); + // We abort the marking phase. + if (!should_continue_with_reclaim) { + abort_time_to_mixed_tracking(); + } collector_state()->set_in_marking_window(false); } @@ -941,12 +967,13 @@ return false; } - size_t marking_initiating_used_threshold = - (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; + size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); + size_t cur_used_bytes = _g1->non_young_capacity_bytes(); size_t alloc_byte_size = alloc_word_size * HeapWordSize; + size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; - if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) { + if (marking_request_bytes > marking_initiating_used_threshold) { if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) { ergo_verbose5(ErgoConcCycles, "request concurrent cycle initiation", @@ -958,7 +985,7 @@ cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, - (double) InitiatingHeapOccupancyPercent, + (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); return true; } else { @@ -987,8 +1014,6 @@ double end_time_sec = os::elapsedTime(); assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), "otherwise, the subtraction below does not make sense"); - size_t rs_size = - _cur_collection_pause_used_regions_at_start - cset_region_length(); size_t cur_used_bytes = _g1->used(); assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); bool last_pause_included_initial_mark = false; @@ -1002,6 +1027,8 @@ } #endif // PRODUCT + record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); + last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); if (last_pause_included_initial_mark) { record_concurrent_mark_init_end(0.0); @@ -1009,12 +1036,12 @@ maybe_start_marking(); } - _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec); + double app_time_ms = 1.0; if (update_stats) { _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times()); // this is where we update the allocation rate of the application - double app_time_ms = + app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); if (app_time_ms < MIN_TIMER_GRANULARITY) { // This usually happens due to the timer not having the required @@ -1079,6 +1106,9 @@ if (next_gc_should_be_mixed("start mixed GCs", "do not start mixed GCs")) { collector_state()->set_gcs_are_young(false); + } else { + // We aborted the mixed GC phase early. + abort_time_to_mixed_tracking(); } collector_state()->set_last_young_gc(false); @@ -1087,7 +1117,6 @@ if (!collector_state()->last_gc_was_young()) { // This is a mixed GC. Here we decide whether to continue doing // mixed GCs or not. - if (!next_gc_should_be_mixed("continue mixed GCs", "do not continue mixed GCs")) { collector_state()->set_gcs_are_young(true); @@ -1179,9 +1208,30 @@ collector_state()->set_in_marking_window(new_in_marking_window); collector_state()->set_in_marking_window_im(new_in_marking_window_im); _free_regions_at_end_of_collection = _g1->num_free_regions(); - update_young_list_max_and_target_length(); + // IHOP control wants to know the expected young gen length if it were not + // restrained by the heap reserve. Using the actual length would make the + // prediction too small and the limit the young gen every time we get to the + // predicted target occupancy. + size_t last_unrestrained_young_length = 0; + update_young_list_max_and_target_length(&last_unrestrained_young_length); update_rs_lengths_prediction(); + double marking_to_mixed_time = -1.0; + if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { + marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); + assert(marking_to_mixed_time > 0.0, + "Initial mark to mixed time must be larger than zero but is %.3f", + marking_to_mixed_time); + } + // Only update IHOP information on regular GCs. + if (update_stats) { + update_ihop_statistics(marking_to_mixed_time, + app_time_ms / 1000.0, + _last_old_allocated_bytes, + last_unrestrained_young_length * HeapRegion::GrainBytes); + } + _last_old_allocated_bytes = 0; + // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; @@ -1207,6 +1257,44 @@ _collectionSetChooser->verify(); } +G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { + return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent, + G1CollectedHeap::heap()->max_capacity()); +} + +void G1CollectorPolicy::update_ihop_statistics(double marking_time, + double mutator_time_s, + size_t mutator_alloc_bytes, + size_t young_gen_size) { + bool report = false; + + // To avoid using really small times that may be caused by e.g. back-to-back gcs + // we filter them out. + double const min_valid_time = 1e-6; + + if (marking_time > min_valid_time) { + _ihop_control->update_time_to_mixed(marking_time); + report = true; + } + + // As an approximation for the young gc promotion rates during marking we use + // all of them. In many applications there are only a few if any young gcs during + // marking, which makes any prediction useless. This increases the accuracy of the + // prediction. + if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { + _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); + report = true; + } + + if (report) { + report_ihop_statistics(); + } +} + +void G1CollectorPolicy::report_ihop_statistics() { + _ihop_control->print(); +} + #define EXT_SIZE_FORMAT "%.1f%s" #define EXT_SIZE_PARAMS(bytes) \ byte_size_in_proper_unit((double)(bytes)), \ @@ -1719,8 +1807,7 @@ return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); } -void -G1CollectorPolicy::record_concurrent_mark_cleanup_end() { +void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { _collectionSetChooser->clear(); WorkGang* workers = _g1->workers(); @@ -1739,7 +1826,8 @@ _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); _cur_mark_stop_world_time_ms += elapsed_time_ms; _prev_collection_pause_end_ms += elapsed_time_ms; - _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec); + + record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); } // Add the heap region at the head of the non-incremental collection set @@ -1955,6 +2043,59 @@ } } +G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const { + assert(!collector_state()->full_collection(), "must be"); + if (collector_state()->during_initial_mark_pause()) { + assert(collector_state()->last_gc_was_young(), "must be"); + assert(!collector_state()->last_young_gc(), "must be"); + return InitialMarkGC; + } else if (collector_state()->last_young_gc()) { + assert(!collector_state()->during_initial_mark_pause(), "must be"); + assert(collector_state()->last_gc_was_young(), "must be"); + return LastYoungGC; + } else if (!collector_state()->last_gc_was_young()) { + assert(!collector_state()->during_initial_mark_pause(), "must be"); + assert(!collector_state()->last_young_gc(), "must be"); + return MixedGC; + } else { + assert(collector_state()->last_gc_was_young(), "must be"); + assert(!collector_state()->during_initial_mark_pause(), "must be"); + assert(!collector_state()->last_young_gc(), "must be"); + return YoungOnlyGC; + } +} + +void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) { + // Manage the MMU tracker. For some reason it ignores Full GCs. + if (kind != FullGC) { + _mmu_tracker->add_pause(start, end); + } + // Manage the mutator time tracking from initial mark to first mixed gc. + switch (kind) { + case FullGC: + abort_time_to_mixed_tracking(); + break; + case Cleanup: + case Remark: + case YoungOnlyGC: + case LastYoungGC: + _initial_mark_to_mixed.add_pause(end - start); + break; + case InitialMarkGC: + _initial_mark_to_mixed.record_initial_mark_end(end); + break; + case MixedGC: + _initial_mark_to_mixed.record_mixed_gc_start(start); + break; + default: + ShouldNotReachHere(); + } +} + +void G1CollectorPolicy::abort_time_to_mixed_tracking() { + _initial_mark_to_mixed.reset(); +} + bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, const char* false_action_str) const { CollectionSetChooser* cset_chooser = _collectionSetChooser; --- old/src/share/vm/gc/g1/g1CollectorPolicy.hpp 2015-11-06 11:34:06.089325386 +0100 +++ new/src/share/vm/gc/g1/g1CollectorPolicy.hpp 2015-11-06 11:34:06.006322965 +0100 @@ -40,6 +40,7 @@ class HeapRegion; class CollectionSetChooser; +class G1IHOPControl; // TraceYoungGenTime collects data on _both_ young and mixed evacuation pauses // (the latter may contain non-young regions - i.e. regions that are @@ -163,6 +164,15 @@ class G1CollectorPolicy: public CollectorPolicy { private: + G1IHOPControl* _ihop_control; + + G1IHOPControl* create_ihop_control() const; + void update_ihop_statistics(double marking_to_mixed_time, + double mutator_time_s, + size_t mutator_alloc_bytes, + size_t young_gen_size); + void report_ihop_statistics(); + G1Predictions _predictor; double get_new_prediction(TruncatedSeq const* seq) const; @@ -271,9 +281,67 @@ size_t _pending_cards; + // The amount of allocated bytes in old gen during the last mutator and the following + // young GC phase. + size_t _last_old_allocated_bytes; + + // Used to track time from the end of initial mark to the first mixed GC. + class InitialMarkToMixedTimeTracker { + private: + bool _active; + double _initial_mark_end_time; + double _mixed_start_time; + double _total_pause_time; + + double wall_time() const { + return _mixed_start_time - _initial_mark_end_time; + } + public: + InitialMarkToMixedTimeTracker() { reset(); } + + void record_initial_mark_end(double end_time) { + assert(!_active, "Initial mark out of order."); + _initial_mark_end_time = end_time; + _active = true; + } + + void record_mixed_gc_start(double start_time) { + if (_active) { + _mixed_start_time = start_time; + _active = false; + } + } + + double last_marking_time() { + assert(has_result(), "Do not have all measurements yet."); + double result = (_mixed_start_time - _initial_mark_end_time) - _total_pause_time; + reset(); + return result; + } + + void reset() { + _active = false; + _total_pause_time = 0.0; + _initial_mark_end_time = -1.0; + _mixed_start_time = -1.0; + } + + void add_pause(double time) { + if (_active) { + _total_pause_time += time; + } + } + + bool has_result() const { return _mixed_start_time > 0.0 && _initial_mark_end_time > 0.0; } + }; + + InitialMarkToMixedTimeTracker _initial_mark_to_mixed; public: G1Predictions& predictor() { return _predictor; } + // Add the given number of bytes to the total number of allocated bytes in the old gen. + void add_last_old_allocated_bytes(size_t bytes) { _last_old_allocated_bytes += bytes; } + // Accessors void set_region_eden(HeapRegion* hr, int young_index_in_cset) { @@ -473,8 +541,8 @@ double _mark_remark_start_sec; double _mark_cleanup_start_sec; - void update_young_list_max_and_target_length(); - void update_young_list_max_and_target_length(size_t rs_lengths); + void update_young_list_max_and_target_length(size_t* unbounded_target_length = NULL); + void update_young_list_max_and_target_length(size_t rs_lengths, size_t* unbounded_target_length = NULL); // Update the young list target length either by setting it to the // desired fixed value or by calculating it using G1's pause @@ -482,7 +550,7 @@ // the RS lengths using the prediction model, otherwise use the // given rs_lengths as the prediction. void update_young_list_target_length(); - void update_young_list_target_length(size_t rs_lengths); + void update_young_list_target_length(size_t rs_lengths, size_t* unbounded_target_length = NULL); // Calculate and return the minimum desired young list target // length. This is the minimum desired young list length according @@ -505,7 +573,7 @@ uint desired_min_length, uint desired_max_length) const; - uint bounded_young_list_target_length(size_t rs_lengths) const; + uint bounded_young_list_target_length(size_t rs_lengths, size_t* unbounded_target_length = NULL) const; void update_rs_lengths_prediction(); void update_rs_lengths_prediction(size_t prediction); @@ -536,10 +604,30 @@ // Sets up marking if proper conditions are met. void maybe_start_marking(); + + // The kind of STW pause. + enum PauseKind { + FullGC, + YoungOnlyGC, + MixedGC, + LastYoungGC, + InitialMarkGC, + Cleanup, + Remark + }; + + // Calculate PauseKind from internal state. + PauseKind young_gc_pause_kind() const; + // Record the given STW pause with the given start and end times (in s). + void record_pause(PauseKind kind, double start, double end); + // Indicate that we aborted marking before doing any mixed GCs. + void abort_time_to_mixed_tracking(); public: G1CollectorPolicy(); + virtual ~G1CollectorPolicy(); + virtual G1CollectorPolicy* as_g1_policy() { return this; } G1CollectorState* collector_state() const; --- old/src/share/vm/gc/g1/g1ErgoVerbose.cpp 2015-11-06 11:34:06.574339534 +0100 +++ new/src/share/vm/gc/g1/g1ErgoVerbose.cpp 2015-11-06 11:34:06.490337084 +0100 @@ -57,6 +57,7 @@ case ErgoConcCycles: return "Concurrent Cycles"; case ErgoMixedGCs: return "Mixed GCs"; case ErgoTiming: return "Timing"; + case ErgoIHOP: return "IHOP"; default: ShouldNotReachHere(); // Keep the Windows compiler happy --- old/src/share/vm/gc/g1/g1ErgoVerbose.hpp 2015-11-06 11:34:07.055353566 +0100 +++ new/src/share/vm/gc/g1/g1ErgoVerbose.hpp 2015-11-06 11:34:06.971351115 +0100 @@ -71,6 +71,7 @@ ErgoConcCycles, ErgoMixedGCs, ErgoTiming, + ErgoIHOP, ErgoHeuristicNum } ErgoHeuristic; --- old/src/share/vm/prims/jni.cpp 2015-11-06 11:34:07.534367539 +0100 +++ new/src/share/vm/prims/jni.cpp 2015-11-06 11:34:07.447365001 +0100 @@ -3876,6 +3876,7 @@ void TestBufferingOopClosure_test(); void TestCodeCacheRemSet_test(); void FreeRegionList_test(); +void IHOP_test(); void test_memset_with_concurrent_readers(); void TestPredictions_test(); void WorkerDataArray_test(); @@ -3922,6 +3923,7 @@ run_unit_test(TestCodeCacheRemSet_test()); if (UseG1GC) { run_unit_test(FreeRegionList_test()); + run_unit_test(IHOP_test()); } run_unit_test(test_memset_with_concurrent_readers()); run_unit_test(TestPredictions_test()); --- /dev/null 2015-11-03 09:29:33.822158357 +0100 +++ new/src/share/vm/gc/g1/g1IHOPControl.cpp 2015-11-06 11:34:07.973380345 +0100 @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1ErgoVerbose.hpp" +#include "gc/g1/g1IHOPControl.hpp" +#include "gc/g1/g1Predictions.hpp" + +G1IHOPControl::G1IHOPControl(double initial_ihop_percent, size_t target_occupancy) : + _ihop_percent(initial_ihop_percent), + _target_occupancy(target_occupancy) { + assert(_ihop_percent >= 0.0 && _ihop_percent <= 100.0, "Initial IHOP value must be between 0 and 100 but is %.3f", initial_ihop_percent); +} + +G1StaticIHOPControl::G1StaticIHOPControl(double ihop_percent, size_t target_occupancy) : + G1IHOPControl(ihop_percent, target_occupancy), + _last_allocation_time_s(0.0), + _last_allocated_bytes(0), + _last_marking_length_s(0.0) { + assert(_target_occupancy > 0, "Target occupancy must be larger than zero."); +} + +void G1StaticIHOPControl::print() { + ergo_verbose6(ErgoIHOP, + "basic information", + ergo_format_reason("value update") + ergo_format_byte_perc("threshold") + ergo_format_byte("target occupancy") + ergo_format_byte("current occupancy") + ergo_format_double("recent old gen allocation rate") + ergo_format_ms("recent marking phase length"), + get_conc_mark_start_threshold(), + (double) get_conc_mark_start_threshold() / _target_occupancy * 100.0, + _target_occupancy, + G1CollectedHeap::heap()->used(), + _last_allocation_time_s > 0.0 ? _last_allocated_bytes / _last_allocation_time_s : 0.0, + _last_marking_length_s * 1000.0); +} + +#ifndef PRODUCT +static void test_update(G1IHOPControl* ctrl, double alloc_time, size_t alloc_amount, size_t young_size, double mark_time) { + for (int i = 0; i < 100; i++) { + ctrl->update_allocation_info(alloc_time, alloc_amount, young_size); + ctrl->update_time_to_mixed(mark_time); + } +} + +void G1StaticIHOPControl::test() { + size_t const initial_ihop = 45; + + G1StaticIHOPControl ctrl(initial_ihop, 100); + size_t threshold; + + threshold = ctrl.get_conc_mark_start_threshold(); + assert(threshold == initial_ihop, + "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_ihop, threshold); + + ctrl.update_allocation_info(100.0, 100, 100); + threshold = ctrl.get_conc_mark_start_threshold(); + assert(threshold == initial_ihop, + "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_ihop, threshold); + + ctrl.update_time_to_mixed(1000.0); + threshold = ctrl.get_conc_mark_start_threshold(); + assert(threshold == initial_ihop, + "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_ihop, threshold); + + // Whatever we pass, the IHOP value must stay the same. + test_update(&ctrl, 2, 10, 10, 3); + threshold = ctrl.get_conc_mark_start_threshold(); + assert(threshold == initial_ihop, + "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_ihop, threshold); + + test_update(&ctrl, 12, 10, 10, 3); + threshold = ctrl.get_conc_mark_start_threshold(); + assert(threshold == initial_ihop, + "Expected IHOP threshold of " SIZE_FORMAT " but is " SIZE_FORMAT, initial_ihop, threshold); +} +#endif + +#ifndef PRODUCT +void IHOP_test() { + G1StaticIHOPControl::test(); +} +#endif --- /dev/null 2015-11-03 09:29:33.822158357 +0100 +++ new/src/share/vm/gc/g1/g1IHOPControl.hpp 2015-11-06 11:34:08.420393384 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_G1IHOPCONTROL_HPP +#define SHARE_VM_GC_G1_G1IHOPCONTROL_HPP + +#include "memory/allocation.hpp" +#include "utilities/numberSeq.hpp" + +class G1Predictions; + +// Manages the decision about the threshold when concurrent marking should start. +class G1IHOPControl : public CHeapObj { + protected: + double _ihop_percent; + size_t _target_occupancy; + + // Initialize an instance with the initial IHOP value in percent and the target + // occupancy. The target occupancy is the number of bytes when marking should + // be finished and reclaim started. + G1IHOPControl(double initial_ihop_percent, size_t target_occupancy); + public: + virtual ~G1IHOPControl() { } + + // Get the current marking threshold in bytes. + virtual size_t get_conc_mark_start_threshold() = 0; + + // Update information about recent time during which allocations happened, + // how many allocations happened and an additional safety buffer. + virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size) = 0; + // Update the time from the end of initial mark to the first mixed gc. + virtual void update_time_to_mixed(double marking_length_s) = 0; + + virtual void print() = 0; +}; + +class G1StaticIHOPControl : public G1IHOPControl { + double _last_allocation_time_s; + size_t _last_allocated_bytes; + double _last_marking_length_s; + public: + G1StaticIHOPControl(double ihop_percent, size_t target_occupancy); + + size_t get_conc_mark_start_threshold() { return (size_t) (_ihop_percent * _target_occupancy / 100.0); } + + virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size) { + assert(allocation_time_s >= 0.0, "Allocation time must be positive but is %.3f", allocation_time_s); + _last_allocation_time_s = allocation_time_s; + _last_allocated_bytes = allocated_bytes; + } + + virtual void update_time_to_mixed(double marking_length_s) { + assert(marking_length_s > 0.0, "Marking length must be larger than zero but is %.3f", marking_length_s); + _last_marking_length_s = marking_length_s; + } + + virtual void print(); +#ifndef PRODUCT + static void test(); +#endif +}; + +#endif // SHARE_VM_GC_G1_G1IHOPCONTROL_HPP