< prev index next >

src/hotspot/share/gc/g1/g1Policy.cpp

Print this page
rev 60060 : [mq]: 8210462-lkorinth-review

*** 72,82 **** _free_regions_at_end_of_collection(0), _rs_length(0), _rs_length_prediction(0), _pending_cards_at_gc_start(0), _old_gen_alloc_tracker(), ! _initial_mark_to_mixed(), _collection_set(NULL), _g1h(NULL), _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), _mark_remark_start_sec(0), _mark_cleanup_start_sec(0), --- 72,82 ---- _free_regions_at_end_of_collection(0), _rs_length(0), _rs_length_prediction(0), _pending_cards_at_gc_start(0), _old_gen_alloc_tracker(), ! _concurrent_start_to_mixed(), _collection_set(NULL), _g1h(NULL), _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), _mark_remark_start_sec(0), _mark_cleanup_start_sec(0),
*** 446,456 **** // "Nuke" the heuristics that control the young/mixed GC // transitions and make sure we start with young GCs after the Full GC. collector_state()->set_in_young_only_phase(true); collector_state()->set_in_young_gc_before_mixed(false); collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); ! collector_state()->set_in_initial_mark_gc(false); collector_state()->set_mark_or_rebuild_in_progress(false); collector_state()->set_clearing_next_bitmap(false); _eden_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups --- 446,456 ---- // "Nuke" the heuristics that control the young/mixed GC // transitions and make sure we start with young GCs after the Full GC. collector_state()->set_in_young_only_phase(true); collector_state()->set_in_young_gc_before_mixed(false); collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); ! collector_state()->set_in_concurrent_start_gc(false); collector_state()->set_mark_or_rebuild_in_progress(false); collector_state()->set_clearing_next_bitmap(false); _eden_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups
*** 542,552 **** assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed"); } void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); ! collector_state()->set_in_initial_mark_gc(false); } void G1Policy::record_concurrent_mark_remark_start() { _mark_remark_start_sec = os::elapsedTime(); } --- 542,552 ---- assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed"); } void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); ! collector_state()->set_in_concurrent_start_gc(false); } void G1Policy::record_concurrent_mark_remark_start() { _mark_remark_start_sec = os::elapsedTime(); }
*** 630,650 **** void G1Policy::record_collection_pause_end(double pause_time_ms) { G1GCPhaseTimes* p = phase_times(); double end_time_sec = os::elapsedTime(); ! bool this_pause_included_initial_mark = false; ! bool this_pause_was_young_only = collector_state()->in_young_only_phase(); bool update_stats = !_g1h->evacuation_failed(); ! record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; ! this_pause_included_initial_mark = collector_state()->in_initial_mark_gc(); ! if (this_pause_included_initial_mark) { record_concurrent_mark_init_end(0.0); } else { maybe_start_marking(); } --- 630,648 ---- void G1Policy::record_collection_pause_end(double pause_time_ms) { G1GCPhaseTimes* p = phase_times(); double end_time_sec = os::elapsedTime(); ! PauseKind this_pause = young_gc_pause_kind(); bool update_stats = !_g1h->evacuation_failed(); ! record_pause(this_pause, end_time_sec - pause_time_ms / 1000.0, end_time_sec); _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; ! if (is_concurrent_start_pause(this_pause)) { record_concurrent_mark_init_end(0.0); } else { maybe_start_marking(); }
*** 671,688 **** _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms); _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); } ! if (collector_state()->in_young_gc_before_mixed()) { ! assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC"); // This has been the young GC before we start doing mixed GCs. We already // decided to start mixed GCs much earlier, so there is nothing to do except // advancing the state. collector_state()->set_in_young_only_phase(false); collector_state()->set_in_young_gc_before_mixed(false); ! } else if (!this_pause_was_young_only) { // This is a mixed GC. Here we decide whether to continue doing more // mixed GCs or not. if (!next_gc_should_be_mixed("continue mixed GCs", "do not continue mixed GCs")) { collector_state()->set_in_young_only_phase(true); --- 669,687 ---- _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms); _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); } ! if (is_last_young_pause(this_pause)) { ! assert(!is_concurrent_start_pause(this_pause), ! "The young GC before mixed is not allowed to be concurrent start GC"); // This has been the young GC before we start doing mixed GCs. We already // decided to start mixed GCs much earlier, so there is nothing to do except // advancing the state. collector_state()->set_in_young_only_phase(false); collector_state()->set_in_young_gc_before_mixed(false); ! } else if (!is_young_only_pause(this_pause)) { // This is a mixed GC. Here we decide whether to continue doing more // mixed GCs or not. if (!next_gc_should_be_mixed("continue mixed GCs", "do not continue mixed GCs")) { collector_state()->set_in_young_only_phase(true);
*** 711,732 **** double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) + average_time_ms(G1GCPhaseTimes::MergeRS) + average_time_ms(G1GCPhaseTimes::MergeHCC) + average_time_ms(G1GCPhaseTimes::MergeLB) + average_time_ms(G1GCPhaseTimes::OptMergeRS); ! _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only); } // Update prediction for card scan size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) + p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards); if (total_cards_scanned > CardsNumSamplingThreshold) { double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR); ! _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, this_pause_was_young_only); } // Update prediction for the ratio between cards from the remembered // sets and actually scanned cards from the remembered sets. // Cards from the remembered sets are all cards not duplicated by cards from --- 710,733 ---- double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) + average_time_ms(G1GCPhaseTimes::MergeRS) + average_time_ms(G1GCPhaseTimes::MergeHCC) + average_time_ms(G1GCPhaseTimes::MergeLB) + average_time_ms(G1GCPhaseTimes::OptMergeRS); ! _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, ! is_young_only_pause(this_pause)); } // Update prediction for card scan size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) + p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards); if (total_cards_scanned > CardsNumSamplingThreshold) { double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR); ! _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, ! is_young_only_pause(this_pause)); } // Update prediction for the ratio between cards from the remembered // sets and actually scanned cards from the remembered sets. // Cards from the remembered sets are all cards not duplicated by cards from
*** 736,746 **** const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0; double merge_to_scan_ratio = 0.0; if (total_cards_scanned > 0) { merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned; } ! _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, this_pause_was_young_only); const size_t recorded_rs_length = _collection_set->recorded_rs_length(); const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0; _analytics->report_rs_length_diff(rs_length_diff); --- 737,748 ---- const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0; double merge_to_scan_ratio = 0.0; if (total_cards_scanned > 0) { merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned; } ! _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, ! is_young_only_pause(this_pause)); const size_t recorded_rs_length = _collection_set->recorded_rs_length(); const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0; _analytics->report_rs_length_diff(rs_length_diff);
*** 766,784 **** // Do not update RS lengths and the number of pending cards with information from mixed gc: // these are is wildly different to during young only gc and mess up young gen sizing right // after the mixed gc phase. // During mixed gc we do not use them for young gen sizing. ! if (this_pause_was_young_only) { _analytics->report_pending_cards((double) _pending_cards_at_gc_start); _analytics->report_rs_length((double) _rs_length); } } ! assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()), ! "If the last pause has been an initial mark, we should not have been in the marking window"); ! if (this_pause_included_initial_mark) { collector_state()->set_mark_or_rebuild_in_progress(true); } _free_regions_at_end_of_collection = _g1h->num_free_regions(); --- 768,786 ---- // Do not update RS lengths and the number of pending cards with information from mixed gc: // these are is wildly different to during young only gc and mess up young gen sizing right // after the mixed gc phase. // During mixed gc we do not use them for young gen sizing. ! if (is_young_only_pause(this_pause)) { _analytics->report_pending_cards((double) _pending_cards_at_gc_start); _analytics->report_rs_length((double) _rs_length); } } ! assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()), ! "If the last pause has been concurrent start, we should not have been in the marking window"); ! if (is_concurrent_start_pause(this_pause)) { collector_state()->set_mark_or_rebuild_in_progress(true); } _free_regions_at_end_of_collection = _g1h->num_free_regions();
*** 795,815 **** _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0); update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(), _old_gen_alloc_tracker.last_cycle_old_bytes(), last_unrestrained_young_length * HeapRegion::GrainBytes, ! this_pause_was_young_only); _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); } else { // Any garbage collection triggered as periodic collection resets the time-to-mixed // measurement. Periodic collection typically means that the application is "inactive", i.e. // the marking threads may have received an uncharacterisic amount of cpu time // for completing the marking, i.e. are faster than expected. // This skews the predicted marking length towards smaller values which might cause // the mark start being too late. ! _initial_mark_to_mixed.reset(); } // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; --- 797,817 ---- _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0); update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(), _old_gen_alloc_tracker.last_cycle_old_bytes(), last_unrestrained_young_length * HeapRegion::GrainBytes, ! is_young_only_pause(this_pause)); _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); } else { // Any garbage collection triggered as periodic collection resets the time-to-mixed // measurement. Periodic collection typically means that the application is "inactive", i.e. // the marking threads may have received an uncharacterisic amount of cpu time // for completing the marking, i.e. are faster than expected. // This skews the predicted marking length towards smaller values which might cause // the mark start being too late. ! _concurrent_start_to_mixed.reset(); } // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
*** 856,869 **** double const min_valid_time = 1e-6; bool report = false; double marking_to_mixed_time = -1.0; ! if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) { ! marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); assert(marking_to_mixed_time > 0.0, ! "Initial mark to mixed time must be larger than zero but is %.3f", marking_to_mixed_time); if (marking_to_mixed_time > min_valid_time) { _ihop_control->update_marking_length(marking_to_mixed_time); report = true; } --- 858,871 ---- double const min_valid_time = 1e-6; bool report = false; double marking_to_mixed_time = -1.0; ! if (!this_gc_was_young_only && _concurrent_start_to_mixed.has_result()) { ! marking_to_mixed_time = _concurrent_start_to_mixed.last_marking_time(); assert(marking_to_mixed_time > 0.0, ! "Concurrent start to mixed time must be larger than zero but is %.3f", marking_to_mixed_time); if (marking_to_mixed_time > min_valid_time) { _ihop_control->update_marking_length(marking_to_mixed_time); report = true; }
*** 1015,1053 **** // be allocated into. _max_survivor_regions = MIN2(desired_max_survivor_regions, _g1h->num_free_or_available_regions()); } ! bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { // We actually check whether we are marking here and not if we are in a // reclamation phase. This means that we will schedule a concurrent mark // even while we are still in the process of reclaiming memory. bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle(); if (!during_cycle) { ! log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); collector_state()->set_initiate_conc_mark_if_possible(true); return true; } else { ! log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); return false; } } void G1Policy::initiate_conc_mark() { ! collector_state()->set_in_initial_mark_gc(true); collector_state()->set_initiate_conc_mark_if_possible(false); } void G1Policy::decide_on_conc_mark_initiation() { ! // We are about to decide on whether this pause will be an ! // initial-mark pause. ! // First, collector_state()->in_initial_mark_gc() should not be already set. We // will set it here if we have to. However, it should be cleared by ! // the end of the pause (it's only set for the duration of an ! // initial-mark pause). ! assert(!collector_state()->in_initial_mark_gc(), "pre-condition"); if (collector_state()->initiate_conc_mark_if_possible()) { // We had noticed on a previous pause that the heap occupancy has // gone over the initiating threshold and we should start a // concurrent marking cycle. Or we've been explicitly requested --- 1017,1059 ---- // be allocated into. _max_survivor_regions = MIN2(desired_max_survivor_regions, _g1h->num_free_or_available_regions()); } ! bool G1Policy::force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause) { // We actually check whether we are marking here and not if we are in a // reclamation phase. This means that we will schedule a concurrent mark // even while we are still in the process of reclaiming memory. bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle(); if (!during_cycle) { ! log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). " ! "GC cause: %s", ! GCCause::to_string(gc_cause)); collector_state()->set_initiate_conc_mark_if_possible(true); return true; } else { ! log_debug(gc, ergo)("Do not request concurrent cycle initiation " ! "(concurrent cycle already in progress). GC cause: %s", ! GCCause::to_string(gc_cause)); return false; } } void G1Policy::initiate_conc_mark() { ! collector_state()->set_in_concurrent_start_gc(true); collector_state()->set_initiate_conc_mark_if_possible(false); } void G1Policy::decide_on_conc_mark_initiation() { ! // We are about to decide on whether this pause will be a ! // concurrent start pause. ! // First, collector_state()->in_concurrent_start_gc() should not be already set. We // will set it here if we have to. However, it should be cleared by ! // the end of the pause (it's only set for the duration of a ! // concurrent start pause). ! assert(!collector_state()->in_concurrent_start_gc(), "pre-condition"); if (collector_state()->initiate_conc_mark_if_possible()) { // We had noticed on a previous pause that the heap occupancy has // gone over the initiating threshold and we should start a // concurrent marking cycle. Or we've been explicitly requested
*** 1057,1073 **** GCCause::Cause cause = _g1h->gc_cause(); if ((cause != GCCause::_wb_breakpoint) && ConcurrentGCBreakpoints::is_controlled()) { log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)"); } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) { ! // Initiate a new initial mark if there is no marking or reclamation going on. initiate_conc_mark(); log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); } else if (_g1h->is_user_requested_concurrent_full_gc(cause) || (cause == GCCause::_wb_breakpoint)) { ! // Initiate a user requested initial mark or run_to a breakpoint. ! // An initial mark must be young only GC, so the collector state // must be updated to reflect this. collector_state()->set_in_young_only_phase(true); collector_state()->set_in_young_gc_before_mixed(false); // We might have ended up coming here about to start a mixed phase with a collection set --- 1063,1079 ---- GCCause::Cause cause = _g1h->gc_cause(); if ((cause != GCCause::_wb_breakpoint) && ConcurrentGCBreakpoints::is_controlled()) { log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)"); } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) { ! // Initiate a new concurrent start if there is no marking or reclamation going on. initiate_conc_mark(); log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); } else if (_g1h->is_user_requested_concurrent_full_gc(cause) || (cause == GCCause::_wb_breakpoint)) { ! // Initiate a user requested concurrent start or run to a breakpoint. ! // A concurrent start must be young only GC, so the collector state // must be updated to reflect this. collector_state()->set_in_young_only_phase(true); collector_state()->set_in_young_gc_before_mixed(false); // We might have ended up coming here about to start a mixed phase with a collection set
*** 1143,1202 **** // this pause we decided to postpone it. That's OK. collector_state()->set_initiate_conc_mark_if_possible(true); } } G1Policy::PauseKind G1Policy::young_gc_pause_kind() const { assert(!collector_state()->in_full_gc(), "must be"); ! if (collector_state()->in_initial_mark_gc()) { assert(!collector_state()->in_young_gc_before_mixed(), "must be"); ! return InitialMarkGC; } else if (collector_state()->in_young_gc_before_mixed()) { ! assert(!collector_state()->in_initial_mark_gc(), "must be"); return LastYoungGC; } else if (collector_state()->in_mixed_phase()) { ! assert(!collector_state()->in_initial_mark_gc(), "must be"); assert(!collector_state()->in_young_gc_before_mixed(), "must be"); return MixedGC; } else { ! assert(!collector_state()->in_initial_mark_gc(), "must be"); assert(!collector_state()->in_young_gc_before_mixed(), "must be"); return YoungOnlyGC; } } void G1Policy::record_pause(PauseKind kind, double start, double end) { // Manage the MMU tracker. For some reason it ignores Full GCs. if (kind != FullGC) { _mmu_tracker->add_pause(start, end); } ! // Manage the mutator time tracking from initial mark to first mixed gc. switch (kind) { case FullGC: abort_time_to_mixed_tracking(); break; case Cleanup: case Remark: case YoungOnlyGC: case LastYoungGC: ! _initial_mark_to_mixed.add_pause(end - start); break; ! case InitialMarkGC: if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { ! _initial_mark_to_mixed.record_initial_mark_end(end); } break; case MixedGC: ! _initial_mark_to_mixed.record_mixed_gc_start(start); break; default: ShouldNotReachHere(); } } void G1Policy::abort_time_to_mixed_tracking() { ! _initial_mark_to_mixed.reset(); } bool G1Policy::next_gc_should_be_mixed(const char* true_action_str, const char* false_action_str) const { G1CollectionSetCandidates* candidates = _collection_set->candidates(); --- 1149,1223 ---- // this pause we decided to postpone it. That's OK. collector_state()->set_initiate_conc_mark_if_possible(true); } } + bool G1Policy::is_young_only_pause(PauseKind kind) { + assert(kind != FullGC, "must be"); + assert(kind != Remark, "must be"); + assert(kind != Cleanup, "must be"); + return kind == ConcurrentStartGC || kind == LastYoungGC || kind == YoungOnlyGC; + } + + bool G1Policy::is_last_young_pause(PauseKind kind) { + return kind == LastYoungGC; + } + + bool G1Policy::is_concurrent_start_pause(PauseKind kind) { + return kind == ConcurrentStartGC; + } + G1Policy::PauseKind G1Policy::young_gc_pause_kind() const { assert(!collector_state()->in_full_gc(), "must be"); ! if (collector_state()->in_concurrent_start_gc()) { assert(!collector_state()->in_young_gc_before_mixed(), "must be"); ! return ConcurrentStartGC; } else if (collector_state()->in_young_gc_before_mixed()) { ! assert(!collector_state()->in_concurrent_start_gc(), "must be"); return LastYoungGC; } else if (collector_state()->in_mixed_phase()) { ! assert(!collector_state()->in_concurrent_start_gc(), "must be"); assert(!collector_state()->in_young_gc_before_mixed(), "must be"); return MixedGC; } else { ! assert(!collector_state()->in_concurrent_start_gc(), "must be"); assert(!collector_state()->in_young_gc_before_mixed(), "must be"); return YoungOnlyGC; } } void G1Policy::record_pause(PauseKind kind, double start, double end) { // Manage the MMU tracker. For some reason it ignores Full GCs. if (kind != FullGC) { _mmu_tracker->add_pause(start, end); } ! // Manage the mutator time tracking from concurrent start to first mixed gc. switch (kind) { case FullGC: abort_time_to_mixed_tracking(); break; case Cleanup: case Remark: case YoungOnlyGC: case LastYoungGC: ! _concurrent_start_to_mixed.add_pause(end - start); break; ! case ConcurrentStartGC: if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { ! _concurrent_start_to_mixed.record_concurrent_start_end(end); } break; case MixedGC: ! _concurrent_start_to_mixed.record_mixed_gc_start(start); break; default: ShouldNotReachHere(); } } void G1Policy::abort_time_to_mixed_tracking() { ! _concurrent_start_to_mixed.reset(); } bool G1Policy::next_gc_should_be_mixed(const char* true_action_str, const char* false_action_str) const { G1CollectionSetCandidates* candidates = _collection_set->candidates();
< prev index next >