< prev index next >

src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Print this page

        

*** 110,135 **** _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), _pause_time_target_ms((double) MaxGCPauseMillis), - _gcs_are_young(true), - - _during_marking(false), - _in_marking_window(false), - _in_marking_window_im(false), - _recent_prev_end_times_for_all_gcs_sec( new TruncatedSeq(NumPrevPausesForHeuristics)), _recent_avg_pause_time_ratio(0.0), - _initiate_conc_mark_if_possible(false), - _during_initial_mark_pause(false), - _last_young_gc(false), - _last_gc_was_young(false), - _eden_used_bytes_before_gc(0), _survivor_used_bytes_before_gc(0), _heap_used_bytes_before_gc(0), _metaspace_used_bytes_before_gc(0), _eden_capacity_bytes_before_gc(0), --- 110,124 ----
*** 565,575 **** desired_max_length = absolute_max_length; } uint young_list_target_length = 0; if (adaptive_young_list_length()) { ! if (gcs_are_young()) { young_list_target_length = calculate_young_list_target_length(rs_lengths, base_min_length, desired_min_length, desired_max_length); --- 554,564 ---- desired_max_length = absolute_max_length; } uint young_list_target_length = 0; if (adaptive_young_list_length()) { ! if (collector_state()->gcs_are_young()) { young_list_target_length = calculate_young_list_target_length(rs_lengths, base_min_length, desired_min_length, desired_max_length);
*** 607,617 **** G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, uint base_min_length, uint desired_min_length, uint desired_max_length) { assert(adaptive_young_list_length(), "pre-condition"); ! assert(gcs_are_young(), "only call this for young GCs"); // In case some edge-condition makes the desired max length too small... if (desired_max_length <= desired_min_length) { return desired_min_length; } --- 596,606 ---- G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, uint base_min_length, uint desired_min_length, uint desired_max_length) { assert(adaptive_young_list_length(), "pre-condition"); ! assert(collector_state()->gcs_are_young(), "only call this for young GCs"); // In case some edge-condition makes the desired max length too small... if (desired_max_length <= desired_min_length) { return desired_min_length; }
*** 710,720 **** double G1CollectorPolicy::predict_survivor_regions_evac_time() { double survivor_regions_evac_time = 0.0; for (HeapRegion * r = _recorded_survivor_head; r != NULL && r != _recorded_survivor_tail->get_next_young_region(); r = r->get_next_young_region()) { ! survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young()); } return survivor_regions_evac_time; } void G1CollectorPolicy::revise_young_list_target_length_if_necessary() { --- 699,709 ---- double G1CollectorPolicy::predict_survivor_regions_evac_time() { double survivor_regions_evac_time = 0.0; for (HeapRegion * r = _recorded_survivor_head; r != NULL && r != _recorded_survivor_tail->get_next_young_region(); r = r->get_next_young_region()) { ! survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); } return survivor_regions_evac_time; } void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
*** 813,828 **** _g1->clear_full_collection(); // "Nuke" the heuristics that control the young/mixed GC // transitions and make sure we start with young GCs after the Full GC. ! set_gcs_are_young(true); ! _last_young_gc = false; ! clear_initiate_conc_mark_if_possible(); ! clear_during_initial_mark_pause(); ! _in_marking_window = false; ! _in_marking_window_im = false; _short_lived_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups record_survivor_regions(0, NULL, NULL); --- 802,817 ---- _g1->clear_full_collection(); // "Nuke" the heuristics that control the young/mixed GC // transitions and make sure we start with young GCs after the Full GC. ! collector_state()->set_gcs_are_young(true); ! collector_state()->set_last_young_gc(false); ! collector_state()->set_initiate_conc_mark_if_possible(false); ! collector_state()->set_during_initial_mark_pause(false); ! collector_state()->set_in_marking_window(false); ! collector_state()->set_in_marking_window_im(false); _short_lived_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups record_survivor_regions(0, NULL, NULL);
*** 858,887 **** _pending_cards = _g1->pending_card_num(); _collection_set_bytes_used_before = 0; _bytes_copied_during_gc = 0; ! _last_gc_was_young = false; // do that for any other surv rate groups _short_lived_surv_rate_group->stop_adding_regions(); _survivors_age_table.clear(); assert( verify_young_ages(), "region age verification" ); } void G1CollectorPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { ! _during_marking = true; ! assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); ! clear_during_initial_mark_pause(); _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; } void G1CollectorPolicy::record_concurrent_mark_remark_start() { _mark_remark_start_sec = os::elapsedTime(); ! _during_marking = false; } void G1CollectorPolicy::record_concurrent_mark_remark_end() { double end_time_sec = os::elapsedTime(); double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; --- 847,876 ---- _pending_cards = _g1->pending_card_num(); _collection_set_bytes_used_before = 0; _bytes_copied_during_gc = 0; ! collector_state()->set_last_gc_was_young(false); // do that for any other surv rate groups _short_lived_surv_rate_group->stop_adding_regions(); _survivors_age_table.clear(); assert( verify_young_ages(), "region age verification" ); } void G1CollectorPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { ! collector_state()->set_during_marking(true); ! assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); ! collector_state()->set_during_initial_mark_pause(false); _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; } void G1CollectorPolicy::record_concurrent_mark_remark_start() { _mark_remark_start_sec = os::elapsedTime(); ! collector_state()->set_during_marking(false); } void G1CollectorPolicy::record_concurrent_mark_remark_end() { double end_time_sec = os::elapsedTime(); double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
*** 895,906 **** void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { _mark_cleanup_start_sec = os::elapsedTime(); } void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { ! _last_young_gc = true; ! _in_marking_window = false; } void G1CollectorPolicy::record_concurrent_pause() { if (_stop_world_start > 0.0) { double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; --- 884,895 ---- void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { _mark_cleanup_start_sec = os::elapsedTime(); } void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { ! collector_state()->set_last_young_gc(true); ! collector_state()->set_in_marking_window(false); } void G1CollectorPolicy::record_concurrent_pause() { if (_stop_world_start > 0.0) { double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
*** 917,927 **** (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; size_t cur_used_bytes = _g1->non_young_capacity_bytes(); size_t alloc_byte_size = alloc_word_size * HeapWordSize; if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) { ! if (gcs_are_young() && !_last_young_gc) { ergo_verbose5(ErgoConcCycles, "request concurrent cycle initiation", ergo_format_reason("occupancy higher than threshold") ergo_format_byte("occupancy") ergo_format_byte("allocation request") --- 906,916 ---- (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; size_t cur_used_bytes = _g1->non_young_capacity_bytes(); size_t alloc_byte_size = alloc_word_size * HeapWordSize; if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) { ! if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) { ergo_verbose5(ErgoConcCycles, "request concurrent cycle initiation", ergo_format_reason("occupancy higher than threshold") ergo_format_byte("occupancy") ergo_format_byte("allocation request")
*** 972,989 **** _short_lived_surv_rate_group->print(); // do that for any other surv rate groups too } #endif // PRODUCT ! last_pause_included_initial_mark = during_initial_mark_pause(); if (last_pause_included_initial_mark) { record_concurrent_mark_init_end(0.0); } else if (need_to_start_conc_mark("end of GC")) { // Note: this might have already been set, if during the last // pause we decided to start a cycle but at the beginning of // this pause we decided to postpone it. That's OK. ! set_initiate_conc_mark_if_possible(); } _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec, false); --- 961,978 ---- _short_lived_surv_rate_group->print(); // do that for any other surv rate groups too } #endif // PRODUCT ! last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); if (last_pause_included_initial_mark) { record_concurrent_mark_init_end(0.0); } else if (need_to_start_conc_mark("end of GC")) { // Note: this might have already been set, if during the last // pause we decided to start a cycle but at the beginning of // this pause we decided to postpone it. That's OK. ! collector_state()->set_initiate_conc_mark_if_possible(true); } _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec, false);
*** 1041,1081 **** _recent_avg_pause_time_ratio = 1.0; } } } ! bool new_in_marking_window = _in_marking_window; bool new_in_marking_window_im = false; if (last_pause_included_initial_mark) { new_in_marking_window = true; new_in_marking_window_im = true; } ! if (_last_young_gc) { // This is supposed to to be the "last young GC" before we start // doing mixed GCs. Here we decide whether to start mixed GCs or not. if (!last_pause_included_initial_mark) { if (next_gc_should_be_mixed("start mixed GCs", "do not start mixed GCs")) { ! set_gcs_are_young(false); } } else { ergo_verbose0(ErgoMixedGCs, "do not start mixed GCs", ergo_format_reason("concurrent cycle is about to start")); } ! _last_young_gc = false; } ! if (!_last_gc_was_young) { // This is a mixed GC. Here we decide whether to continue doing // mixed GCs or not. if (!next_gc_should_be_mixed("continue mixed GCs", "do not continue mixed GCs")) { ! set_gcs_are_young(true); } } _short_lived_surv_rate_group->start_adding_regions(); // Do that for any other surv rate groups --- 1030,1070 ---- _recent_avg_pause_time_ratio = 1.0; } } } ! bool new_in_marking_window = collector_state()->in_marking_window(); bool new_in_marking_window_im = false; if (last_pause_included_initial_mark) { new_in_marking_window = true; new_in_marking_window_im = true; } ! if (collector_state()->last_young_gc()) { // This is supposed to to be the "last young GC" before we start // doing mixed GCs. Here we decide whether to start mixed GCs or not. if (!last_pause_included_initial_mark) { if (next_gc_should_be_mixed("start mixed GCs", "do not start mixed GCs")) { ! collector_state()->set_gcs_are_young(false); } } else { ergo_verbose0(ErgoMixedGCs, "do not start mixed GCs", ergo_format_reason("concurrent cycle is about to start")); } ! collector_state()->set_last_young_gc(false); } ! if (!collector_state()->last_gc_was_young()) { // This is a mixed GC. Here we decide whether to continue doing // mixed GCs or not. if (!next_gc_should_be_mixed("continue mixed GCs", "do not continue mixed GCs")) { ! collector_state()->set_gcs_are_young(true); } } _short_lived_surv_rate_group->start_adding_regions(); // Do that for any other surv rate groups
*** 1090,1110 **** size_t cards_scanned = _g1->cards_scanned(); double cost_per_entry_ms = 0.0; if (cards_scanned > 10) { cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned; ! if (_last_gc_was_young) { _cost_per_entry_ms_seq->add(cost_per_entry_ms); } else { _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); } } if (_max_rs_lengths > 0) { double cards_per_entry_ratio = (double) cards_scanned / (double) _max_rs_lengths; ! if (_last_gc_was_young) { _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); } else { _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); } } --- 1079,1099 ---- size_t cards_scanned = _g1->cards_scanned(); double cost_per_entry_ms = 0.0; if (cards_scanned > 10) { cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned; ! if (collector_state()->last_gc_was_young()) { _cost_per_entry_ms_seq->add(cost_per_entry_ms); } else { _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); } } if (_max_rs_lengths > 0) { double cards_per_entry_ratio = (double) cards_scanned / (double) _max_rs_lengths; ! if (collector_state()->last_gc_was_young()) { _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); } else { _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); } }
*** 1132,1142 **** size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; double cost_per_byte_ms = 0.0; if (copied_bytes > 0) { cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes; ! if (_in_marking_window) { _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); } else { _cost_per_byte_ms_seq->add(cost_per_byte_ms); } } --- 1121,1131 ---- size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; double cost_per_byte_ms = 0.0; if (copied_bytes > 0) { cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes; ! if (collector_state()->in_marking_window()) { _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); } else { _cost_per_byte_ms_seq->add(cost_per_byte_ms); } }
*** 1175,1186 **** _pending_cards_seq->add((double) _pending_cards); _rs_lengths_seq->add((double) _max_rs_lengths); } ! _in_marking_window = new_in_marking_window; ! _in_marking_window_im = new_in_marking_window_im; _free_regions_at_end_of_collection = _g1->num_free_regions(); update_young_list_target_length(); // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; --- 1164,1175 ---- _pending_cards_seq->add((double) _pending_cards); _rs_lengths_seq->add((double) _max_rs_lengths); } ! collector_state()->set_in_marking_window(new_in_marking_window); ! collector_state()->set_in_marking_window_im(new_in_marking_window_im); _free_regions_at_end_of_collection = _g1->num_free_regions(); update_young_list_target_length(); // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
*** 1304,1314 **** double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { size_t rs_length = predict_rs_length_diff(); size_t card_num; ! if (gcs_are_young()) { card_num = predict_young_card_num(rs_length); } else { card_num = predict_non_young_card_num(rs_length); } return predict_base_elapsed_time_ms(pending_cards, card_num); --- 1293,1303 ---- double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { size_t rs_length = predict_rs_length_diff(); size_t card_num; ! if (collector_state()->gcs_are_young()) { card_num = predict_young_card_num(rs_length); } else { card_num = predict_non_young_card_num(rs_length); } return predict_base_elapsed_time_ms(pending_cards, card_num);
*** 1470,1480 **** ergo_verbose1(ErgoConcCycles, "request concurrent cycle initiation", ergo_format_reason("requested by GC cause") ergo_format_str("GC cause"), GCCause::to_string(gc_cause)); ! set_initiate_conc_mark_if_possible(); return true; } else { ergo_verbose1(ErgoConcCycles, "do not request concurrent cycle initiation", ergo_format_reason("concurrent cycle already in progress") --- 1459,1469 ---- ergo_verbose1(ErgoConcCycles, "request concurrent cycle initiation", ergo_format_reason("requested by GC cause") ergo_format_str("GC cause"), GCCause::to_string(gc_cause)); ! collector_state()->set_initiate_conc_mark_if_possible(true); return true; } else { ergo_verbose1(ErgoConcCycles, "do not request concurrent cycle initiation", ergo_format_reason("concurrent cycle already in progress")
*** 1487,1525 **** void G1CollectorPolicy::decide_on_conc_mark_initiation() { // We are about to decide on whether this pause will be an // initial-mark pause. ! // First, during_initial_mark_pause() should not be already set. We // will set it here if we have to. However, it should be cleared by // the end of the pause (it's only set for the duration of an // initial-mark pause). ! assert(!during_initial_mark_pause(), "pre-condition"); ! if (initiate_conc_mark_if_possible()) { // We had noticed on a previous pause that the heap occupancy has // gone over the initiating threshold and we should start a // concurrent marking cycle. So we might initiate one. bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); if (!during_cycle) { // The concurrent marking thread is not "during a cycle", i.e., // it has completed the last one. So we can go ahead and // initiate a new cycle. ! set_during_initial_mark_pause(); // We do not allow mixed GCs during marking. ! if (!gcs_are_young()) { ! set_gcs_are_young(true); ergo_verbose0(ErgoMixedGCs, "end mixed GCs", ergo_format_reason("concurrent cycle is about to start")); } // And we can now clear initiate_conc_mark_if_possible() as // we've already acted on it. ! clear_initiate_conc_mark_if_possible(); ergo_verbose0(ErgoConcCycles, "initiate concurrent cycle", ergo_format_reason("concurrent cycle initiation requested")); } else { --- 1476,1514 ---- void G1CollectorPolicy::decide_on_conc_mark_initiation() { // We are about to decide on whether this pause will be an // initial-mark pause. ! // First, collector_state()->during_initial_mark_pause() should not be already set. We // will set it here if we have to. However, it should be cleared by // the end of the pause (it's only set for the duration of an // initial-mark pause). ! assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); ! if (collector_state()->initiate_conc_mark_if_possible()) { // We had noticed on a previous pause that the heap occupancy has // gone over the initiating threshold and we should start a // concurrent marking cycle. So we might initiate one. bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); if (!during_cycle) { // The concurrent marking thread is not "during a cycle", i.e., // it has completed the last one. So we can go ahead and // initiate a new cycle. ! collector_state()->set_during_initial_mark_pause(true); // We do not allow mixed GCs during marking. ! if (!collector_state()->gcs_are_young()) { ! collector_state()->set_gcs_are_young(true); ergo_verbose0(ErgoMixedGCs, "end mixed GCs", ergo_format_reason("concurrent cycle is about to start")); } // And we can now clear initiate_conc_mark_if_possible() as // we've already acted on it. ! collector_state()->set_initiate_conc_mark_if_possible(false); ergo_verbose0(ErgoConcCycles, "initiate concurrent cycle", ergo_format_reason("concurrent cycle initiation requested")); } else {
*** 1687,1697 **** // Therefore this routine may be called at a safepoint by the // VM thread, or in-between safepoints by mutator threads (when // retiring the current allocation region) or a concurrent // refine thread (RSet sampling). ! double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); size_t used_bytes = hr->used(); _inc_cset_recorded_rs_lengths += rs_length; _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; _inc_cset_bytes_used_before += used_bytes; --- 1676,1686 ---- // Therefore this routine may be called at a safepoint by the // VM thread, or in-between safepoints by mutator threads (when // retiring the current allocation region) or a concurrent // refine thread (RSet sampling). ! double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); size_t used_bytes = hr->used(); _inc_cset_recorded_rs_lengths += rs_length; _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; _inc_cset_bytes_used_before += used_bytes;
*** 1722,1732 **** ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); ! double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; hr->set_recorded_rs_length(new_rs_length); hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); --- 1711,1721 ---- ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); ! double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; hr->set_recorded_rs_length(new_rs_length); hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
*** 1917,1929 **** ergo_format_ms("predicted base time") ergo_format_ms("remaining time") ergo_format_ms("target pause time"), _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); ! _last_gc_was_young = gcs_are_young() ? true : false; ! if (_last_gc_was_young) { _trace_young_gen_time_data.increment_young_collection_count(); } else { _trace_young_gen_time_data.increment_mixed_collection_count(); } --- 1906,1918 ---- ergo_format_ms("predicted base time") ergo_format_ms("remaining time") ergo_format_ms("target pause time"), _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); ! collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young() ? true : false); ! if (collector_state()->last_gc_was_young()) { _trace_young_gen_time_data.increment_young_collection_count(); } else { _trace_young_gen_time_data.increment_mixed_collection_count(); }
*** 1970,1980 **** phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); // Set the start of the non-young choice time. double non_young_start_time_sec = young_end_time_sec; ! if (!gcs_are_young()) { CollectionSetChooser* cset_chooser = _collectionSetChooser; cset_chooser->verify(); const uint min_old_cset_length = calc_min_old_cset_length(); const uint max_old_cset_length = calc_max_old_cset_length(); --- 1959,1969 ---- phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); // Set the start of the non-young choice time. double non_young_start_time_sec = young_end_time_sec; ! if (!collector_state()->gcs_are_young()) { CollectionSetChooser* cset_chooser = _collectionSetChooser; cset_chooser->verify(); const uint min_old_cset_length = calc_min_old_cset_length(); const uint max_old_cset_length = calc_max_old_cset_length();
*** 2016,2026 **** reclaimable_bytes, reclaimable_perc, threshold); break; } ! double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); if (check_time_remaining) { if (predicted_time_ms > time_remaining_ms) { // Too expensive for the current CSet. if (old_cset_region_length() >= min_old_cset_length) { --- 2005,2015 ---- reclaimable_bytes, reclaimable_perc, threshold); break; } ! double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); if (check_time_remaining) { if (predicted_time_ms > time_remaining_ms) { // Too expensive for the current CSet. if (old_cset_region_length() >= min_old_cset_length) {
< prev index next >