< prev index next >

src/hotspot/share/gc/g1/g1Policy.cpp

Print this page
rev 49511 : imported patch 8200234-g1concurrentmark-refactorings
rev 49522 : [mq]: 8035557-pause-time-predictions
rev 49523 : imported patch 8035557-pause-time-predictions-cleanup
rev 49525 : [mq]: 8200426-sangheon-review

*** 60,70 **** _reserve_regions(0), _rs_lengths_prediction(0), _bytes_allocated_in_old_since_last_gc(0), _initial_mark_to_mixed(), _collection_set(NULL), ! _g1(NULL), _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), _tenuring_threshold(MaxTenuringThreshold), _max_survivor_regions(0), _survivors_age_table(true), _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) { --- 60,70 ---- _reserve_regions(0), _rs_lengths_prediction(0), _bytes_allocated_in_old_since_last_gc(0), _initial_mark_to_mixed(), _collection_set(NULL), ! _g1h(NULL), _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), _tenuring_threshold(MaxTenuringThreshold), _max_survivor_regions(0), _survivors_age_table(true), _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) {
*** 72,95 **** G1Policy::~G1Policy() { delete _ihop_control; } ! G1CollectorState* G1Policy::collector_state() const { return _g1->collector_state(); } void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) { ! _g1 = g1h; _collection_set = collection_set; assert(Heap_lock->owned_by_self(), "Locking discipline."); if (!adaptive_young_list_length()) { _young_list_fixed_length = _young_gen_sizer.min_desired_young_length(); } ! _young_gen_sizer.adjust_max_new_size(_g1->max_regions()); ! _free_regions_at_end_of_collection = _g1->num_free_regions(); update_young_list_max_and_target_length(); // We may immediately start allocating regions and placing them on the // collection set list. Initialize the per-collection set info _collection_set->start_incremental_building(); --- 72,95 ---- G1Policy::~G1Policy() { delete _ihop_control; } ! G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); } void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) { ! _g1h = g1h; _collection_set = collection_set; assert(Heap_lock->owned_by_self(), "Locking discipline."); if (!adaptive_young_list_length()) { _young_list_fixed_length = _young_gen_sizer.min_desired_young_length(); } ! _young_gen_sizer.adjust_max_new_size(_g1h->max_regions()); ! _free_regions_at_end_of_collection = _g1h->num_free_regions(); update_young_list_max_and_target_length(); // We may immediately start allocating regions and placing them on the // collection set list. Initialize the per-collection set info _collection_set->start_incremental_building();
*** 216,230 **** YoungTargetLengths result; // Calculate the absolute and desired min bounds first. // This is how many young regions we already have (currently: the survivors). ! const uint base_min_length = _g1->survivor_regions_count(); uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); // This is the absolute minimum young length. Ensure that we // will at least have one eden region available for allocation. ! uint absolute_min_length = base_min_length + MAX2(_g1->eden_regions_count(), (uint)1); // If we shrank the young list target it should not shrink below the current size. desired_min_length = MAX2(desired_min_length, absolute_min_length); // Calculate the absolute and desired max bounds. uint desired_max_length = calculate_young_list_desired_max_length(); --- 216,230 ---- YoungTargetLengths result; // Calculate the absolute and desired min bounds first. // This is how many young regions we already have (currently: the survivors). ! const uint base_min_length = _g1h->survivor_regions_count(); uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); // This is the absolute minimum young length. Ensure that we // will at least have one eden region available for allocation. ! uint absolute_min_length = base_min_length + MAX2(_g1h->eden_regions_count(), (uint)1); // If we shrank the young list target it should not shrink below the current size. desired_min_length = MAX2(desired_min_length, absolute_min_length); // Calculate the absolute and desired max bounds. uint desired_max_length = calculate_young_list_desired_max_length();
*** 379,389 **** return base_min_length + min_young_length; } double G1Policy::predict_survivor_regions_evac_time() const { double survivor_regions_evac_time = 0.0; ! const GrowableArray<HeapRegion*>* survivor_regions = _g1->survivor()->regions(); for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin(); it != survivor_regions->end(); ++it) { survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->in_young_only_phase()); --- 379,389 ---- return base_min_length + min_young_length; } double G1Policy::predict_survivor_regions_evac_time() const { double survivor_regions_evac_time = 0.0; ! const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions(); for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin(); it != survivor_regions->end(); ++it) { survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->in_young_only_phase());
*** 442,452 **** collector_state()->set_clearing_next_bitmap(false); _short_lived_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups ! _free_regions_at_end_of_collection = _g1->num_free_regions(); // Reset survivors SurvRateGroup. _survivor_surv_rate_group->reset(); update_young_list_max_and_target_length(); update_rs_lengths_prediction(); --- 442,452 ---- collector_state()->set_clearing_next_bitmap(false); _short_lived_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups ! _free_regions_at_end_of_collection = _g1h->num_free_regions(); // Reset survivors SurvRateGroup. _survivor_surv_rate_group->reset(); update_young_list_max_and_target_length(); update_rs_lengths_prediction();
*** 459,483 **** // We only need to do this here as the policy will only be applied // to the GC we're about to start. so, no point is calculating this // every time we calculate / recalculate the target young length. update_survivors_policy(); ! assert(_g1->used() == _g1->recalculate_used(), "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, ! _g1->used(), _g1->recalculate_used()); phase_times()->record_cur_collection_start_sec(start_time_sec); ! _pending_cards = _g1->pending_card_num(); _collection_set->reset_bytes_used_before(); _bytes_copied_during_gc = 0; // do that for any other surv rate groups _short_lived_surv_rate_group->stop_adding_regions(); _survivors_age_table.clear(); ! assert(_g1->collection_set()->verify_young_ages(), "region age verification failed"); } void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); collector_state()->set_in_initial_mark_gc(false); --- 459,483 ---- // We only need to do this here as the policy will only be applied // to the GC we're about to start. so, no point is calculating this // every time we calculate / recalculate the target young length. update_survivors_policy(); ! assert(_g1h->used() == _g1h->recalculate_used(), "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, ! _g1h->used(), _g1h->recalculate_used()); phase_times()->record_cur_collection_start_sec(start_time_sec); ! _pending_cards = _g1h->pending_card_num(); _collection_set->reset_bytes_used_before(); _bytes_copied_during_gc = 0; // do that for any other surv rate groups _short_lived_surv_rate_group->stop_adding_regions(); _survivors_age_table.clear(); ! assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed"); } void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); collector_state()->set_in_initial_mark_gc(false);
*** 525,554 **** CollectionSetChooser* G1Policy::cset_chooser() const { return _collection_set->cset_chooser(); } bool G1Policy::about_to_start_mixed_phase() const { ! return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed(); } bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { if (about_to_start_mixed_phase()) { return false; } size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); ! size_t cur_used_bytes = _g1->non_young_capacity_bytes(); size_t alloc_byte_size = alloc_word_size * HeapWordSize; size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; bool result = false; if (marking_request_bytes > marking_initiating_used_threshold) { result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed(); log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", ! cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); } return result; } --- 525,554 ---- CollectionSetChooser* G1Policy::cset_chooser() const { return _collection_set->cset_chooser(); } bool G1Policy::about_to_start_mixed_phase() const { ! return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed(); } bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { if (about_to_start_mixed_phase()) { return false; } size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); ! size_t cur_used_bytes = _g1h->non_young_capacity_bytes(); size_t alloc_byte_size = alloc_word_size * HeapWordSize; size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; bool result = false; if (marking_request_bytes > marking_initiating_used_threshold) { result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed(); log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", ! cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source); } return result; }
*** 556,571 **** #define MIN_TIMER_GRANULARITY 0.0000001 void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { double end_time_sec = os::elapsedTime(); ! size_t cur_used_bytes = _g1->used(); ! assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); bool this_pause_included_initial_mark = false; bool this_pause_was_young_only = collector_state()->in_young_only_phase(); ! bool update_stats = !_g1->evacuation_failed(); record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; --- 556,571 ---- #define MIN_TIMER_GRANULARITY 0.0000001 void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) { double end_time_sec = os::elapsedTime(); ! size_t cur_used_bytes = _g1h->used(); ! assert(cur_used_bytes == _g1h->recalculate_used(), "It should!"); bool this_pause_included_initial_mark = false; bool this_pause_was_young_only = collector_state()->in_young_only_phase(); ! bool update_stats = !_g1h->evacuation_failed(); record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
*** 701,711 **** "If the last pause has been an initial mark, we should not have been in the marking window"); if (this_pause_included_initial_mark) { collector_state()->set_mark_or_rebuild_in_progress(true); } ! _free_regions_at_end_of_collection = _g1->num_free_regions(); // IHOP control wants to know the expected young gen length if it were not // restrained by the heap reserve. Using the actual length would make the // prediction too small and the limit the young gen every time we get to the // predicted target occupancy. size_t last_unrestrained_young_length = update_young_list_max_and_target_length(); --- 701,711 ---- "If the last pause has been an initial mark, we should not have been in the marking window"); if (this_pause_included_initial_mark) { collector_state()->set_mark_or_rebuild_in_progress(true); } ! _free_regions_at_end_of_collection = _g1h->num_free_regions(); // IHOP control wants to know the expected young gen length if it were not // restrained by the heap reserve. Using the actual length would make the // prediction too small and the limit the young gen every time we get to the // predicted target occupancy. size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
*** 715,725 **** _bytes_allocated_in_old_since_last_gc, last_unrestrained_young_length * HeapRegion::GrainBytes, this_pause_was_young_only); _bytes_allocated_in_old_since_last_gc = 0; ! _ihop_control->send_trace_event(_g1->gc_tracer_stw()); // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; if (update_rs_time_goal_ms < scan_hcc_time_ms) { --- 715,725 ---- _bytes_allocated_in_old_since_last_gc, last_unrestrained_young_length * HeapRegion::GrainBytes, this_pause_was_young_only); _bytes_allocated_in_old_since_last_gc = 0; ! _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; if (update_rs_time_goal_ms < scan_hcc_time_ms) {
*** 729,739 **** update_rs_time_goal_ms = 0; } else { update_rs_time_goal_ms -= scan_hcc_time_ms; } ! _g1->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms); cset_chooser()->verify(); } --- 729,739 ---- update_rs_time_goal_ms = 0; } else { update_rs_time_goal_ms -= scan_hcc_time_ms; } ! _g1h->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms); cset_chooser()->verify(); }
*** 858,874 **** } return region_elapsed_time_ms; } bool G1Policy::should_allocate_mutator_region() const { ! uint young_list_length = _g1->young_regions_count(); uint young_list_target_length = _young_list_target_length; return young_list_length < young_list_target_length; } bool G1Policy::can_expand_young_list() const { ! uint young_list_length = _g1->young_regions_count(); uint young_list_max_length = _young_list_max_length; return young_list_length < young_list_max_length; } bool G1Policy::adaptive_young_list_length() const { --- 858,874 ---- } return region_elapsed_time_ms; } bool G1Policy::should_allocate_mutator_region() const { ! uint young_list_length = _g1h->young_regions_count(); uint young_list_target_length = _young_list_target_length; return young_list_length < young_list_target_length; } bool G1Policy::can_expand_young_list() const { ! uint young_list_length = _g1h->young_regions_count(); uint young_list_max_length = _young_list_max_length; return young_list_length < young_list_max_length; } bool G1Policy::adaptive_young_list_length() const {
*** 916,926 **** bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { // We actually check whether we are marking here and not if we are in a // reclamation phase. This means that we will schedule a concurrent mark // even while we are still in the process of reclaiming memory. ! bool during_cycle = _g1->concurrent_mark()->cm_thread()->during_cycle(); if (!during_cycle) { log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); collector_state()->set_initiate_conc_mark_if_possible(true); return true; } else { --- 916,926 ---- bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { // We actually check whether we are marking here and not if we are in a // reclamation phase. This means that we will schedule a concurrent mark // even while we are still in the process of reclaiming memory. ! bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle(); if (!during_cycle) { log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); collector_state()->set_initiate_conc_mark_if_possible(true); return true; } else {
*** 951,961 **** if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) { // Initiate a new initial mark if there is no marking or reclamation going on. initiate_conc_mark(); log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); ! } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { // Initiate a user requested initial mark. An initial mark must be young only // GC, so the collector state must be updated to reflect this. collector_state()->set_in_young_only_phase(true); collector_state()->set_in_young_gc_before_mixed(false); --- 951,961 ---- if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) { // Initiate a new initial mark if there is no marking or reclamation going on. initiate_conc_mark(); log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); ! } else if (_g1h->is_user_requested_concurrent_full_gc(_g1h->gc_cause())) { // Initiate a user requested initial mark. An initial mark must be young only // GC, so the collector state must be updated to reflect this. collector_state()->set_in_young_only_phase(true); collector_state()->set_in_young_gc_before_mixed(false);
*** 984,994 **** } } } void G1Policy::record_concurrent_mark_cleanup_end() { ! cset_chooser()->rebuild(_g1->workers(), _g1->num_regions()); bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs"); if (!mixed_gc_pending) { clear_collection_set_candidates(); abort_time_to_mixed_tracking(); --- 984,994 ---- } } } void G1Policy::record_concurrent_mark_cleanup_end() { ! cset_chooser()->rebuild(_g1h->workers(), _g1h->num_regions()); bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs"); if (!mixed_gc_pending) { clear_collection_set_candidates(); abort_time_to_mixed_tracking();
*** 1003,1013 **** record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); } double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const { ! return percent_of(reclaimable_bytes, _g1->capacity()); } class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure { virtual bool do_heap_region(HeapRegion* r) { r->rem_set()->clear_locked(true /* only_cardset */); --- 1003,1013 ---- record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); } double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const { ! return percent_of(reclaimable_bytes, _g1h->capacity()); } class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure { virtual bool do_heap_region(HeapRegion* r) { r->rem_set()->clear_locked(true /* only_cardset */);
< prev index next >