< prev index next >

src/hotspot/share/gc/g1/g1Policy.cpp

Print this page
rev 57223 : imported patch 8225484-changes-to-survivor-calculation

*** 61,71 **** _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC), _young_list_target_length(0), _young_list_fixed_length(0), _young_list_max_length(0), _eden_surv_rate_group(new G1SurvRateGroup()), ! _survivor_surv_rate_group(new G1SurvRateGroup()), _reserve_factor((double) G1ReservePercent / 100.0), _reserve_regions(0), _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()), _free_regions_at_end_of_collection(0), _rs_length(0), --- 61,72 ---- _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC), _young_list_target_length(0), _young_list_fixed_length(0), _young_list_max_length(0), _eden_surv_rate_group(new G1SurvRateGroup()), ! _survivor_used_bytes_at_start(0), ! _survivor_used_bytes_at_end(0), _reserve_factor((double) G1ReservePercent / 100.0), _reserve_regions(0), _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()), _free_regions_at_end_of_collection(0), _rs_length(0),
*** 82,92 **** _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), _mark_remark_start_sec(0), _mark_cleanup_start_sec(0), _tenuring_threshold(MaxTenuringThreshold), _max_survivor_regions(0), ! _survivors_age_table(true) { } G1Policy::~G1Policy() { delete _ihop_control; --- 83,93 ---- _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), _mark_remark_start_sec(0), _mark_cleanup_start_sec(0), _tenuring_threshold(MaxTenuringThreshold), _max_survivor_regions(0), ! _surviving_survivor_words(0) { } G1Policy::~G1Policy() { delete _ihop_control;
*** 396,405 **** --- 397,409 ---- double survivor_regions_evac_time = 0.0; const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions(); for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin(); it != survivor_regions->end(); ++it) { + // We could split out copy_time from total time here and calculate it based on + // the number of survivor regions. Since we need to iterate over the regions + // for the non_copy time anyway, keep it. survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase()); } return survivor_regions_evac_time; }
*** 453,466 **** collector_state()->set_in_initial_mark_gc(false); collector_state()->set_mark_or_rebuild_in_progress(false); collector_state()->set_clearing_next_bitmap(false); _eden_surv_rate_group->start_adding_regions(); - // also call this on any additional surv rate groups _free_regions_at_end_of_collection = _g1h->num_free_regions(); - _survivor_surv_rate_group->reset(); update_young_list_max_and_target_length(); update_rs_length_prediction(); _pending_cards_at_prev_gc_end = _g1h->pending_card_num(); _bytes_allocated_in_old_since_last_gc = 0; --- 457,468 ----
*** 525,534 **** --- 527,538 ---- void G1Policy::record_collection_pause_start(double start_time_sec) { // We only need to do this here as the policy will only be applied // to the GC we're about to start. so, no point is calculating this // every time we calculate / recalculate the target young length. update_survivors_policy(); + _survivor_used_bytes_at_start = _g1h->survivor()->used_bytes(); + _survivor_used_bytes_at_end = 0; assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(), "Maximum survivor regions %u plus used regions %u exceeds max regions %u", max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions()); assert_used_and_recalculate_used_equal(_g1h);
*** 540,549 **** --- 544,554 ---- _collection_set->reset_bytes_used_before(); // do that for any other surv rate groups _eden_surv_rate_group->stop_adding_regions(); _survivors_age_table.clear(); + _surviving_survivor_words = 0; assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed"); } void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
*** 650,659 **** --- 655,675 ---- record_concurrent_mark_init_end(0.0); } else { maybe_start_marking(); } + size_t survived = _surviving_survivor_words * HeapWordSize; + + if (_survivor_used_bytes_at_start != 0) { + double ratio = (double)survived / _survivor_used_bytes_at_start; + guarantee(ratio >= 0.0 && ratio <= 1.0, "ratio %.3lf", ratio); + _analytics->report_survivor_ratio(ratio); + } else { + _analytics->report_survivor_ratio(0.0f); + } + _survivor_used_bytes_at_end = survived; + double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms()); if (app_time_ms < MIN_TIMER_GRANULARITY) { // This usually happens due to the timer not having the required // granularity. Some Linuxes are the usual culprits. // We'll just set it to something (arbitrarily) small.
*** 912,926 **** size_t rs_length = _analytics->predict_rs_length(); return predict_base_elapsed_time_ms(pending_cards, rs_length); } size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const { ! size_t bytes_to_copy; if (!hr->is_young()) { bytes_to_copy = hr->max_live_bytes(); } else { ! bytes_to_copy = (size_t) (hr->used() * hr->surv_rate_prediction(_predictor)); } return bytes_to_copy; } double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) const { --- 928,957 ---- size_t rs_length = _analytics->predict_rs_length(); return predict_base_elapsed_time_ms(pending_cards, rs_length); } size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const { ! size_t bytes_to_copy = 0; if (!hr->is_young()) { bytes_to_copy = hr->max_live_bytes(); } else { ! size_t survived = hr->survivor_bytes(); ! ! guarantee(hr->used() >= survived, "Used " SIZE_FORMAT " >= survived " SIZE_FORMAT, hr->used(), survived); ! ! if (survived > 0) { ! bytes_to_copy += survived * _analytics->predict_survivor_ratio(); ! } ! ! if (hr->used() > survived) { ! bytes_to_copy += (size_t) ((hr->used() - survived) * hr->surv_rate_prediction(_predictor)); ! } ! /* ! log_debug(gc)("straggler region %u type %s old survived " SIZE_FORMAT " exp survived " SIZE_FORMAT " eden used " SIZE_FORMAT " exp eden survived " SIZE_FORMAT " total " SIZE_FORMAT, ! hr->hrm_index(), hr->get_short_type_str(), survived, (size_t)(survived * _analytics->predict_survivor_ratio()), hr->used() - survived, ! ((hr->used() - survived) > 0) ? (size_t)((hr->used() - survived) * hr->surv_rate_prediction(_predictor)) : 0, bytes_to_copy); ! */ } return bytes_to_copy; } double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) const {
*** 1386,1412 **** log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms", num_optional_regions, max_optional_regions, prediction_ms); } void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) { - note_start_adding_survivor_regions(); - HeapRegion* last = NULL; for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin(); it != survivors->regions()->end(); ++it) { HeapRegion* curr = *it; - set_region_survivor(curr); // The region is a non-empty survivor so let's add it to // the incremental collection set for the next evacuation // pause. _collection_set->add_survivor_regions(curr); last = curr; } - note_stop_adding_survivor_regions(); // Don't clear the survivor list handles until the start of // the next evacuation pause - we need it in order to re-tag // the survivor regions from this evacuation pause as 'young' // at the start of the next. --- 1417,1439 ----
< prev index next >