< prev index next >

src/hotspot/share/gc/g1/g1Policy.cpp

Print this page
rev 59792 : imported patch 8244603-sjohanss-review


  45 #include "gc/shared/concurrentGCBreakpoints.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "logging/log.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/java.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/growableArray.hpp"
  54 #include "utilities/pair.hpp"
  55 
  56 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  57   _predictor(G1ConfidencePercent / 100.0),
  58   _analytics(new G1Analytics(&_predictor)),
  59   _remset_tracker(),
  60   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
  61   _ihop_control(create_ihop_control(&_predictor)),
  62   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  63   _full_collection_start_sec(0.0),
  64   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),

  65   _young_list_target_length(0),
  66   _young_list_max_length(0),
  67   _eden_surv_rate_group(new G1SurvRateGroup()),
  68   _survivor_surv_rate_group(new G1SurvRateGroup()),
  69   _reserve_factor((double) G1ReservePercent / 100.0),
  70   _reserve_regions(0),
  71   _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
  72   _free_regions_at_end_of_collection(0),
  73   _rs_length(0),
  74   _rs_length_prediction(0),
  75   _pending_cards_at_gc_start(0),
  76   _old_gen_alloc_tracker(),
  77   _initial_mark_to_mixed(),
  78   _collection_set(NULL),
  79   _g1h(NULL),
  80   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
  81   _mark_remark_start_sec(0),
  82   _mark_cleanup_start_sec(0),
  83   _tenuring_threshold(MaxTenuringThreshold),
  84   _max_survivor_regions(0),


  94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
  95   if (G1Arguments::is_heterogeneous_heap()) {
  96     return new G1HeterogeneousHeapPolicy(gc_timer_stw);
  97   } else {
  98     return new G1Policy(gc_timer_stw);
  99   }
 100 }
 101 
 102 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
 103 
 104 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
 105   _g1h = g1h;
 106   _collection_set = collection_set;
 107 
 108   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 109 
 110   _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
 111 
 112   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 113 
 114   update_young_max_and_target_length();
 115   // We may immediately start allocating regions and placing them on the
 116   // collection set list. Initialize the per-collection set info
 117   _collection_set->start_incremental_building();
 118 }
 119 
 120 void G1Policy::note_gc_start() {
 121   phase_times()->note_gc_start();
 122 }
 123 
 124 class G1YoungLengthPredictor {
 125   const double _base_time_ms;
 126   const double _base_free_regions;
 127   const double _target_pause_time_ms;
 128   const G1Policy* const _policy;
 129 
 130  public:
 131   G1YoungLengthPredictor(double base_time_ms,
 132                          double base_free_regions,
 133                          double target_pause_time_ms,
 134                          const G1Policy* policy) :


 183 
 184   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 185 
 186   _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
 187 }
 188 
 189 uint G1Policy::calculate_desired_eden_length_by_mmu() const {
 190   // One could argue that any useful eden length to keep any MMU would be 1, but
 191   // in theory this is possible. Other constraints enforce a minimum eden of 1
 192   // anyway.
 193   uint desired_min_length = 0;
 194   if (use_adaptive_young_list_length()) {
 195     double now_sec = os::elapsedTime();
 196     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 197     double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
 198     desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 199   }
 200   return desired_min_length;
 201 }
 202 
 203 uint G1Policy::update_young_max_and_target_length() {
 204   return update_young_max_and_target_length(_analytics->predict_rs_length());
 205 }
 206 
 207 uint G1Policy::update_young_max_and_target_length(size_t rs_length) {
 208   uint unbounded_target_length = update_young_target_length(rs_length);
 209   update_max_gc_locker_expansion();
 210   return unbounded_target_length;
 211 }
 212 
 213 uint G1Policy::update_young_target_length(size_t rs_length) {
 214   uint desired_length = calculate_young_desired_length(rs_length);
 215   _young_list_target_length = calculate_young_target_length(desired_length);
 216 
 217   log_debug(gc,ergo,heap)("Young target lengths: desired: %u target: %u",
 218                           desired_length, _young_list_target_length);
 219   return desired_length;
 220 }
 221 
 222 // Calculates desired young gen length. It is calculated from:
 223 //
 224 // - sizer min/max bounds on young gen
 225 // - pause time goal for whole young gen evacuation
 226 // - MMU goal influencing eden to make GCs spaced apart.
 227 // - a minimum one eden region length.
 228 //






 229 uint G1Policy::calculate_young_desired_length(size_t rs_length) const {
 230   uint min_young_length_by_sizer = _young_gen_sizer->min_desired_young_length();
 231   uint max_young_length_by_sizer = _young_gen_sizer->max_desired_young_length();
 232 
 233   assert(min_young_length_by_sizer >= 1, "invariant");
 234   assert(max_young_length_by_sizer >= min_young_length_by_sizer, "invariant");
 235 
 236   // Absolute minimum eden length. See above why.
 237   // Enforcing a minimum eden length helps at startup when the predictors are not
 238   // yet trained on the application to avoid unnecessary (but very short) full gcs
 239   // on very small (initial) heaps.
 240   uint const MinDesiredEdenLength = 1;
 241 
 242   // Calculate the absolute and desired min bounds first.
 243 
 244   // This is how many survivor regions we already have.
 245   const uint survivor_length = _g1h->survivor_regions_count();
 246   // Size of the already allocated young gen.
 247   const uint allocated_young_length = _g1h->young_regions_count();
 248   // This is the absolute minimum young length that we can return. Ensure that we
 249   // don't go below any user-defined minimum bound; but we might have already
 250   // allocated more than that for reasons. In this case, use that.
 251   uint absolute_min_young_length = MAX2(allocated_young_length, min_young_length_by_sizer);
 252   // Calculate the absolute max bounds. After evac failure or when revising the
 253   // young length we might have exceeded absolute min length or absolute_max_length,
 254   // so adjust the result accordingly.
 255   uint absolute_max_young_length = MAX2(max_young_length_by_sizer, absolute_min_young_length);
 256 


 501 }
 502 
 503 double G1Policy::predict_survivor_regions_evac_time() const {
 504   double survivor_regions_evac_time = 0.0;
 505   const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
 506   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
 507        it != survivor_regions->end();
 508        ++it) {
 509     survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase());
 510   }
 511   return survivor_regions_evac_time;
 512 }
 513 
 514 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
 515   guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
 516 
 517   if (rs_length > _rs_length_prediction) {
 518     // add 10% to avoid having to recalculate often
 519     size_t rs_length_prediction = rs_length * 1100 / 1000;
 520     update_rs_length_prediction(rs_length_prediction);
 521 
 522     update_young_max_and_target_length(rs_length_prediction);
 523   }
 524 }
 525 
 526 void G1Policy::update_rs_length_prediction() {
 527   update_rs_length_prediction(_analytics->predict_rs_length());
 528 }
 529 
 530 void G1Policy::update_rs_length_prediction(size_t prediction) {
 531   if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
 532     _rs_length_prediction = prediction;
 533   }
 534 }
 535 
 536 void G1Policy::record_full_collection_start() {
 537   _full_collection_start_sec = os::elapsedTime();
 538   // Release the future to-space so that it is available for compaction into.
 539   collector_state()->set_in_young_only_phase(false);
 540   collector_state()->set_in_full_gc(true);
 541   _collection_set->clear_candidates();
 542   _pending_cards_at_gc_start = 0;


 550   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 551 
 552   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 553 
 554   collector_state()->set_in_full_gc(false);
 555 
 556   // "Nuke" the heuristics that control the young/mixed GC
 557   // transitions and make sure we start with young GCs after the Full GC.
 558   collector_state()->set_in_young_only_phase(true);
 559   collector_state()->set_in_young_gc_before_mixed(false);
 560   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 561   collector_state()->set_in_initial_mark_gc(false);
 562   collector_state()->set_mark_or_rebuild_in_progress(false);
 563   collector_state()->set_clearing_next_bitmap(false);
 564 
 565   _eden_surv_rate_group->start_adding_regions();
 566   // also call this on any additional surv rate groups
 567 
 568   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 569   _survivor_surv_rate_group->reset();
 570   update_young_max_and_target_length();
 571   update_rs_length_prediction();
 572 
 573   _old_gen_alloc_tracker.reset_after_full_gc();
 574 
 575   record_pause(FullGC, _full_collection_start_sec, end_sec);
 576 }
 577 
 578 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
 579   log_debug(gc, refine, stats)
 580            ("%s refinement: %.2fms, refined: " SIZE_FORMAT
 581             ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
 582             kind,
 583             stats.refinement_time().seconds() * MILLIUNITS,
 584             stats.refined_cards(),
 585             stats.precleaned_cards(),
 586             stats.dirtied_cards());
 587 }
 588 
 589 void G1Policy::record_concurrent_refinement_stats() {
 590   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();


 880     // During mixed gc we do not use them for young gen sizing.
 881     if (this_pause_was_young_only) {
 882       _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
 883       _analytics->report_rs_length((double) _rs_length);
 884     }
 885   }
 886 
 887   assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
 888          "If the last pause has been an initial mark, we should not have been in the marking window");
 889   if (this_pause_included_initial_mark) {
 890     collector_state()->set_mark_or_rebuild_in_progress(true);
 891   }
 892 
 893   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 894 
 895   update_rs_length_prediction();
 896 
 897   // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
 898   // that in this case we are not running in a "normal" operating mode.
 899   if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
 900     // IHOP control wants to know the expected young gen length if it were not
 901     // restrained by the heap reserve. Using the actual length would make the
 902     // prediction too small and the limit the young gen every time we get to the
 903     // predicted target occupancy.
 904     size_t last_unrestrained_young_length = update_young_max_and_target_length();
 905 
 906     _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
 907     update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
 908                            _old_gen_alloc_tracker.last_cycle_old_bytes(),
 909                            last_unrestrained_young_length * HeapRegion::GrainBytes,
 910                            this_pause_was_young_only);
 911 
 912     _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
 913   } else {
 914     // Any garbage collection triggered as periodic collection resets the time-to-mixed
 915     // measurement. Periodic collection typically means that the application is "inactive", i.e.
 916     // the marking threads may have received an uncharacterisic amount of cpu time
 917     // for completing the marking, i.e. are faster than expected.
 918     // This skews the predicted marking length towards smaller values which might cause
 919     // the mark start being too late.
 920     _initial_mark_to_mixed.reset();
 921   }
 922 
 923   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
 924   double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 925 
 926   if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
 927     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
 928                                 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
 929                                 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);


 939                               scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
 940 
 941   _g1h->concurrent_refine()->adjust(logged_cards_time,
 942                                     phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
 943                                     scan_logged_cards_time_goal_ms);
 944 }
 945 
 946 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
 947   if (G1UseAdaptiveIHOP) {
 948     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
 949                                      predictor,
 950                                      G1ReservePercent,
 951                                      G1HeapWastePercent);
 952   } else {
 953     return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
 954   }
 955 }
 956 
 957 void G1Policy::update_ihop_prediction(double mutator_time_s,
 958                                       size_t mutator_alloc_bytes,
 959                                       size_t young_gen_size,
 960                                       bool this_gc_was_young_only) {
 961   // Always try to update IHOP prediction. Even evacuation failures give information
 962   // about e.g. whether to start IHOP earlier next time.
 963 
 964   // Avoid using really small application times that might create samples with
 965   // very high or very low values. They may be caused by e.g. back-to-back gcs.
 966   double const min_valid_time = 1e-6;
 967 
 968   bool report = false;
 969 
 970   double marking_to_mixed_time = -1.0;
 971   if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) {
 972     marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
 973     assert(marking_to_mixed_time > 0.0,
 974            "Initial mark to mixed time must be larger than zero but is %.3f",
 975            marking_to_mixed_time);
 976     if (marking_to_mixed_time > min_valid_time) {
 977       _ihop_control->update_marking_length(marking_to_mixed_time);
 978       report = true;
 979     }
 980   }
 981 
 982   // As an approximation for the young gc promotion rates during marking we use
 983   // all of them. In many applications there are only a few if any young gcs during
 984   // marking, which makes any prediction useless. This increases the accuracy of the
 985   // prediction.
 986   if (this_gc_was_young_only && mutator_time_s > min_valid_time) {





 987     _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
 988     report = true;
 989   }
 990 
 991   if (report) {
 992     report_ihop_statistics();
 993   }
 994 }
 995 
 996 void G1Policy::report_ihop_statistics() {
 997   _ihop_control->print();
 998 }
 999 
1000 void G1Policy::print_phases() {
1001   phase_times()->print();
1002 }
1003 
1004 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
1005                                               size_t rs_length) const {
1006   size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());


1073 
1074 bool G1Policy::can_expand_young_list() const {
1075   uint young_list_length = _g1h->young_regions_count();
1076   uint young_list_max_length = _young_list_max_length;
1077   return young_list_length < young_list_max_length;
1078 }
1079 
1080 bool G1Policy::use_adaptive_young_list_length() const {
1081   return _young_gen_sizer->use_adaptive_young_list_length();
1082 }
1083 
1084 size_t G1Policy::desired_survivor_size(uint max_regions) const {
1085   size_t const survivor_capacity = HeapRegion::GrainWords * max_regions;
1086   return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
1087 }
1088 
1089 void G1Policy::print_age_table() {
1090   _survivors_age_table.print_age_table(_tenuring_threshold);
1091 }
1092 
1093 void G1Policy::update_max_gc_locker_expansion() {
1094   uint expansion_region_num = 0;
1095   if (GCLockerEdenExpansionPercent > 0) {
1096     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1097     double expansion_region_num_d = perc * (double) _young_list_target_length;
1098     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1099     // less than 1.0) we'll get 1.
1100     expansion_region_num = (uint) ceil(expansion_region_num_d);
1101   } else {
1102     assert(expansion_region_num == 0, "sanity");
1103   }
1104   _young_list_max_length = _young_list_target_length + expansion_region_num;
1105   assert(_young_list_target_length <= _young_list_max_length, "post-condition");

1106 }
1107 
1108 // Calculates survivor space parameters.
1109 void G1Policy::update_survivors_policy() {
1110   double max_survivor_regions_d =
1111                  (double) _young_list_target_length / (double) SurvivorRatio;
1112 
1113   // Calculate desired survivor size based on desired max survivor regions (unconstrained
1114   // by remaining heap). Otherwise we may cause undesired promotions as we are
1115   // already getting close to end of the heap, impacting performance even more.
1116   uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1117   size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1118 
1119   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1120   if (UsePerfData) {
1121     _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1122     _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1123   }
1124   // The real maximum survivor size is bounded by the number of regions that can
1125   // be allocated into.




  45 #include "gc/shared/concurrentGCBreakpoints.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "logging/log.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/java.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/growableArray.hpp"
  54 #include "utilities/pair.hpp"
  55 
  56 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  57   _predictor(G1ConfidencePercent / 100.0),
  58   _analytics(new G1Analytics(&_predictor)),
  59   _remset_tracker(),
  60   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
  61   _ihop_control(create_ihop_control(&_predictor)),
  62   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  63   _full_collection_start_sec(0.0),
  64   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
  65   _young_list_desired_length(0),
  66   _young_list_target_length(0),
  67   _young_list_max_length(0),
  68   _eden_surv_rate_group(new G1SurvRateGroup()),
  69   _survivor_surv_rate_group(new G1SurvRateGroup()),
  70   _reserve_factor((double) G1ReservePercent / 100.0),
  71   _reserve_regions(0),
  72   _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
  73   _free_regions_at_end_of_collection(0),
  74   _rs_length(0),
  75   _rs_length_prediction(0),
  76   _pending_cards_at_gc_start(0),
  77   _old_gen_alloc_tracker(),
  78   _initial_mark_to_mixed(),
  79   _collection_set(NULL),
  80   _g1h(NULL),
  81   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
  82   _mark_remark_start_sec(0),
  83   _mark_cleanup_start_sec(0),
  84   _tenuring_threshold(MaxTenuringThreshold),
  85   _max_survivor_regions(0),


  95 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
  96   if (G1Arguments::is_heterogeneous_heap()) {
  97     return new G1HeterogeneousHeapPolicy(gc_timer_stw);
  98   } else {
  99     return new G1Policy(gc_timer_stw);
 100   }
 101 }
 102 
 103 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
 104 
 105 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
 106   _g1h = g1h;
 107   _collection_set = collection_set;
 108 
 109   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 110 
 111   _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
 112 
 113   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 114 
 115   update_young_length_bounds();
 116   // We may immediately start allocating regions and placing them on the
 117   // collection set list. Initialize the per-collection set info
 118   _collection_set->start_incremental_building();
 119 }
 120 
 121 void G1Policy::note_gc_start() {
 122   phase_times()->note_gc_start();
 123 }
 124 
 125 class G1YoungLengthPredictor {
 126   const double _base_time_ms;
 127   const double _base_free_regions;
 128   const double _target_pause_time_ms;
 129   const G1Policy* const _policy;
 130 
 131  public:
 132   G1YoungLengthPredictor(double base_time_ms,
 133                          double base_free_regions,
 134                          double target_pause_time_ms,
 135                          const G1Policy* policy) :


 184 
 185   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 186 
 187   _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
 188 }
 189 
 190 uint G1Policy::calculate_desired_eden_length_by_mmu() const {
 191   // One could argue that any useful eden length to keep any MMU would be 1, but
 192   // in theory this is possible. Other constraints enforce a minimum eden of 1
 193   // anyway.
 194   uint desired_min_length = 0;
 195   if (use_adaptive_young_list_length()) {
 196     double now_sec = os::elapsedTime();
 197     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 198     double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
 199     desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 200   }
 201   return desired_min_length;
 202 }
 203 
 204 void G1Policy::update_young_length_bounds() {
 205   update_young_length_bounds(_analytics->predict_rs_length());
 206 }
 207 
 208 void G1Policy::update_young_length_bounds(size_t rs_length) {
 209   _young_list_desired_length = calculate_young_desired_length(rs_length);
 210   _young_list_target_length = calculate_young_target_length(_young_list_desired_length);
 211   _young_list_max_length = calculate_young_max_length(_young_list_target_length);
 212 
 213   log_debug(gc,ergo,heap)("Young list lengths: desired: %u, target: %u, max: %u",
 214                           _young_list_desired_length,
 215                           _young_list_target_length,
 216                           _young_list_max_length);




 217 }
 218 
 219 // Calculates desired young gen length. It is calculated from:
 220 //
 221 // - sizer min/max bounds on young gen
 222 // - pause time goal for whole young gen evacuation
 223 // - MMU goal influencing eden to make GCs spaced apart.
 224 // - a minimum one eden region length.
 225 //
 226 // We may enter with already allocated eden and survivor regions, that may be
 227 // higher than the maximum, or the above goals may result in a desired value
 228 // smaller than are already allocated.
 229 // The main reason is revising young length, with our without the GCLocker being
 230 // active.
 231 //
 232 uint G1Policy::calculate_young_desired_length(size_t rs_length) const {
 233   uint min_young_length_by_sizer = _young_gen_sizer->min_desired_young_length();
 234   uint max_young_length_by_sizer = _young_gen_sizer->max_desired_young_length();
 235 
 236   assert(min_young_length_by_sizer >= 1, "invariant");
 237   assert(max_young_length_by_sizer >= min_young_length_by_sizer, "invariant");
 238 
 239   // Absolute minimum eden length.
 240   // Enforcing a minimum eden length helps at startup when the predictors are not
 241   // yet trained on the application to avoid unnecessary (but very short) full gcs
 242   // on very small (initial) heaps.
 243   uint const MinDesiredEdenLength = 1;
 244 
 245   // Calculate the absolute and desired min bounds first.
 246 
 247   // This is how many survivor regions we already have.
 248   const uint survivor_length = _g1h->survivor_regions_count();
 249   // Size of the already allocated young gen.
 250   const uint allocated_young_length = _g1h->young_regions_count();
 251   // This is the absolute minimum young length that we can return. Ensure that we
 252   // don't go below any user-defined minimum bound; but we might have already
 253   // allocated more than that for reasons. In this case, use that.
 254   uint absolute_min_young_length = MAX2(allocated_young_length, min_young_length_by_sizer);
 255   // Calculate the absolute max bounds. After evac failure or when revising the
 256   // young length we might have exceeded absolute min length or absolute_max_length,
 257   // so adjust the result accordingly.
 258   uint absolute_max_young_length = MAX2(max_young_length_by_sizer, absolute_min_young_length);
 259 


 504 }
 505 
 506 double G1Policy::predict_survivor_regions_evac_time() const {
 507   double survivor_regions_evac_time = 0.0;
 508   const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
 509   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
 510        it != survivor_regions->end();
 511        ++it) {
 512     survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase());
 513   }
 514   return survivor_regions_evac_time;
 515 }
 516 
 517 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
 518   guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
 519 
 520   if (rs_length > _rs_length_prediction) {
 521     // add 10% to avoid having to recalculate often
 522     size_t rs_length_prediction = rs_length * 1100 / 1000;
 523     update_rs_length_prediction(rs_length_prediction);
 524     update_young_length_bounds(rs_length_prediction);

 525   }
 526 }
 527 
 528 void G1Policy::update_rs_length_prediction() {
 529   update_rs_length_prediction(_analytics->predict_rs_length());
 530 }
 531 
 532 void G1Policy::update_rs_length_prediction(size_t prediction) {
 533   if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
 534     _rs_length_prediction = prediction;
 535   }
 536 }
 537 
 538 void G1Policy::record_full_collection_start() {
 539   _full_collection_start_sec = os::elapsedTime();
 540   // Release the future to-space so that it is available for compaction into.
 541   collector_state()->set_in_young_only_phase(false);
 542   collector_state()->set_in_full_gc(true);
 543   _collection_set->clear_candidates();
 544   _pending_cards_at_gc_start = 0;


 552   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 553 
 554   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 555 
 556   collector_state()->set_in_full_gc(false);
 557 
 558   // "Nuke" the heuristics that control the young/mixed GC
 559   // transitions and make sure we start with young GCs after the Full GC.
 560   collector_state()->set_in_young_only_phase(true);
 561   collector_state()->set_in_young_gc_before_mixed(false);
 562   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 563   collector_state()->set_in_initial_mark_gc(false);
 564   collector_state()->set_mark_or_rebuild_in_progress(false);
 565   collector_state()->set_clearing_next_bitmap(false);
 566 
 567   _eden_surv_rate_group->start_adding_regions();
 568   // also call this on any additional surv rate groups
 569 
 570   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 571   _survivor_surv_rate_group->reset();
 572   update_young_length_bounds();
 573   update_rs_length_prediction();
 574 
 575   _old_gen_alloc_tracker.reset_after_full_gc();
 576 
 577   record_pause(FullGC, _full_collection_start_sec, end_sec);
 578 }
 579 
 580 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
 581   log_debug(gc, refine, stats)
 582            ("%s refinement: %.2fms, refined: " SIZE_FORMAT
 583             ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
 584             kind,
 585             stats.refinement_time().seconds() * MILLIUNITS,
 586             stats.refined_cards(),
 587             stats.precleaned_cards(),
 588             stats.dirtied_cards());
 589 }
 590 
 591 void G1Policy::record_concurrent_refinement_stats() {
 592   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();


 882     // During mixed gc we do not use them for young gen sizing.
 883     if (this_pause_was_young_only) {
 884       _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
 885       _analytics->report_rs_length((double) _rs_length);
 886     }
 887   }
 888 
 889   assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
 890          "If the last pause has been an initial mark, we should not have been in the marking window");
 891   if (this_pause_included_initial_mark) {
 892     collector_state()->set_mark_or_rebuild_in_progress(true);
 893   }
 894 
 895   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 896 
 897   update_rs_length_prediction();
 898 
 899   // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
 900   // that in this case we are not running in a "normal" operating mode.
 901   if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
 902     update_young_length_bounds();




 903 
 904     _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
 905     update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
 906                            _old_gen_alloc_tracker.last_cycle_old_bytes(),

 907                            this_pause_was_young_only);
 908 
 909     _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
 910   } else {
 911     // Any garbage collection triggered as periodic collection resets the time-to-mixed
 912     // measurement. Periodic collection typically means that the application is "inactive", i.e.
 913     // the marking threads may have received an uncharacterisic amount of cpu time
 914     // for completing the marking, i.e. are faster than expected.
 915     // This skews the predicted marking length towards smaller values which might cause
 916     // the mark start being too late.
 917     _initial_mark_to_mixed.reset();
 918   }
 919 
 920   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
 921   double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 922 
 923   if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
 924     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
 925                                 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
 926                                 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);


 936                               scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
 937 
 938   _g1h->concurrent_refine()->adjust(logged_cards_time,
 939                                     phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
 940                                     scan_logged_cards_time_goal_ms);
 941 }
 942 
 943 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
 944   if (G1UseAdaptiveIHOP) {
 945     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
 946                                      predictor,
 947                                      G1ReservePercent,
 948                                      G1HeapWastePercent);
 949   } else {
 950     return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
 951   }
 952 }
 953 
 954 void G1Policy::update_ihop_prediction(double mutator_time_s,
 955                                       size_t mutator_alloc_bytes,

 956                                       bool this_gc_was_young_only) {
 957   // Always try to update IHOP prediction. Even evacuation failures give information
 958   // about e.g. whether to start IHOP earlier next time.
 959 
 960   // Avoid using really small application times that might create samples with
 961   // very high or very low values. They may be caused by e.g. back-to-back gcs.
 962   double const min_valid_time = 1e-6;
 963 
 964   bool report = false;
 965 
 966   double marking_to_mixed_time = -1.0;
 967   if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) {
 968     marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
 969     assert(marking_to_mixed_time > 0.0,
 970            "Initial mark to mixed time must be larger than zero but is %.3f",
 971            marking_to_mixed_time);
 972     if (marking_to_mixed_time > min_valid_time) {
 973       _ihop_control->update_marking_length(marking_to_mixed_time);
 974       report = true;
 975     }
 976   }
 977 
 978   // As an approximation for the young gc promotion rates during marking we use
 979   // all of them. In many applications there are only a few if any young gcs during
 980   // marking, which makes any prediction useless. This increases the accuracy of the
 981   // prediction.
 982   if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
 983     // IHOP control wants to know the expected young gen length if it were not
 984     // restrained by the heap reserve. Using the actual length would make the
 985     // prediction too small and the limit the young gen every time we get to the
 986     // predicted target occupancy.
 987     uint young_gen_size = young_list_desired_length() * HeapRegion::GrainBytes;
 988     _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
 989     report = true;
 990   }
 991 
 992   if (report) {
 993     report_ihop_statistics();
 994   }
 995 }
 996 
 997 void G1Policy::report_ihop_statistics() {
 998   _ihop_control->print();
 999 }
1000 
1001 void G1Policy::print_phases() {
1002   phase_times()->print();
1003 }
1004 
1005 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
1006                                               size_t rs_length) const {
1007   size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());


1074 
1075 bool G1Policy::can_expand_young_list() const {
1076   uint young_list_length = _g1h->young_regions_count();
1077   uint young_list_max_length = _young_list_max_length;
1078   return young_list_length < young_list_max_length;
1079 }
1080 
1081 bool G1Policy::use_adaptive_young_list_length() const {
1082   return _young_gen_sizer->use_adaptive_young_list_length();
1083 }
1084 
1085 size_t G1Policy::desired_survivor_size(uint max_regions) const {
1086   size_t const survivor_capacity = HeapRegion::GrainWords * max_regions;
1087   return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
1088 }
1089 
1090 void G1Policy::print_age_table() {
1091   _survivors_age_table.print_age_table(_tenuring_threshold);
1092 }
1093 
1094 uint G1Policy::calculate_young_max_length(uint target_young_length) const {
1095   uint expansion_region_num = 0;
1096   if (GCLockerEdenExpansionPercent > 0) {
1097     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1098     double expansion_region_num_d = perc * (double) _young_list_target_length;
1099     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1100     // less than 1.0) we'll get 1.
1101     expansion_region_num = (uint) ceil(expansion_region_num_d);
1102   } else {
1103     assert(expansion_region_num == 0, "sanity");
1104   }
1105   uint max_length = target_young_length + expansion_region_num;
1106   assert(target_young_length <= max_length, "post-condition");
1107   return max_length;
1108 }
1109 
1110 // Calculates survivor space parameters.
1111 void G1Policy::update_survivors_policy() {
1112   double max_survivor_regions_d =
1113                  (double) _young_list_target_length / (double) SurvivorRatio;
1114 
1115   // Calculate desired survivor size based on desired max survivor regions (unconstrained
1116   // by remaining heap). Otherwise we may cause undesired promotions as we are
1117   // already getting close to end of the heap, impacting performance even more.
1118   uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1119   size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1120 
1121   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1122   if (UsePerfData) {
1123     _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1124     _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1125   }
1126   // The real maximum survivor size is bounded by the number of regions that can
1127   // be allocated into.


< prev index next >