< prev index next >

src/hotspot/share/gc/g1/g1Policy.cpp

Print this page
rev 60300 : 8249676: [REDO] G1 incorrectly limiting young gen size when using the reserve can result in repeated full gcs
Summary: Reapply JDK-8244603 after fixing JDK-8249192.
Reviewed-by:


  29 #include "gc/g1/g1CollectionSet.hpp"
  30 #include "gc/g1/g1CollectionSetCandidates.hpp"
  31 #include "gc/g1/g1ConcurrentMark.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1ConcurrentRefine.hpp"
  34 #include "gc/g1/g1ConcurrentRefineStats.hpp"
  35 #include "gc/g1/g1CollectionSetChooser.hpp"
  36 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
  37 #include "gc/g1/g1HotCardCache.hpp"
  38 #include "gc/g1/g1IHOPControl.hpp"
  39 #include "gc/g1/g1GCPhaseTimes.hpp"
  40 #include "gc/g1/g1Policy.hpp"
  41 #include "gc/g1/g1SurvivorRegions.hpp"
  42 #include "gc/g1/g1YoungGenSizer.hpp"
  43 #include "gc/g1/heapRegion.inline.hpp"
  44 #include "gc/g1/heapRegionRemSet.hpp"
  45 #include "gc/shared/concurrentGCBreakpoints.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "logging/log.hpp"
  48 #include "runtime/arguments.hpp"

  49 #include "runtime/java.hpp"
  50 #include "runtime/mutexLocker.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/growableArray.hpp"
  53 #include "utilities/pair.hpp"
  54 
  55 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  56   _predictor(G1ConfidencePercent / 100.0),
  57   _analytics(new G1Analytics(&_predictor)),
  58   _remset_tracker(),
  59   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
  60   _ihop_control(create_ihop_control(&_predictor)),
  61   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  62   _full_collection_start_sec(0.0),
  63   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),

  64   _young_list_target_length(0),
  65   _young_list_fixed_length(0),
  66   _young_list_max_length(0),
  67   _eden_surv_rate_group(new G1SurvRateGroup()),
  68   _survivor_surv_rate_group(new G1SurvRateGroup()),
  69   _reserve_factor((double) G1ReservePercent / 100.0),
  70   _reserve_regions(0),
  71   _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
  72   _free_regions_at_end_of_collection(0),
  73   _rs_length(0),
  74   _rs_length_prediction(0),
  75   _pending_cards_at_gc_start(0),
  76   _old_gen_alloc_tracker(),
  77   _concurrent_start_to_mixed(),
  78   _collection_set(NULL),
  79   _g1h(NULL),
  80   _phase_times_timer(gc_timer),
  81   _phase_times(NULL),
  82   _mark_remark_start_sec(0),
  83   _mark_cleanup_start_sec(0),
  84   _tenuring_threshold(MaxTenuringThreshold),
  85   _max_survivor_regions(0),


  91   delete _ihop_control;
  92   delete _young_gen_sizer;
  93 }
  94 
  95 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
  96   if (G1Arguments::is_heterogeneous_heap()) {
  97     return new G1HeterogeneousHeapPolicy(gc_timer_stw);
  98   } else {
  99     return new G1Policy(gc_timer_stw);
 100   }
 101 }
 102 
 103 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
 104 
 105 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
 106   _g1h = g1h;
 107   _collection_set = collection_set;
 108 
 109   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 110 
 111   if (!use_adaptive_young_list_length()) {
 112     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
 113   }
 114   _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
 115 
 116   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 117 
 118   update_young_list_max_and_target_length();
 119   // We may immediately start allocating regions and placing them on the
 120   // collection set list. Initialize the per-collection set info
 121   _collection_set->start_incremental_building();
 122 }
 123 
 124 void G1Policy::note_gc_start() {
 125   phase_times()->note_gc_start();
 126 }
 127 
 128 class G1YoungLengthPredictor {
 129   const double _base_time_ms;
 130   const double _base_free_regions;
 131   const double _target_pause_time_ms;
 132   const G1Policy* const _policy;
 133 
 134  public:
 135   G1YoungLengthPredictor(double base_time_ms,
 136                          double base_free_regions,
 137                          double target_pause_time_ms,
 138                          const G1Policy* policy) :


 173       return false;
 174     }
 175 
 176     // success!
 177     return true;
 178   }
 179 };
 180 
 181 void G1Policy::record_new_heap_size(uint new_number_of_regions) {
 182   // re-calculate the necessary reserve
 183   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
 184   // We use ceiling so that if reserve_regions_d is > 0.0 (but
 185   // smaller than 1.0) we'll get 1.
 186   _reserve_regions = (uint) ceil(reserve_regions_d);
 187 
 188   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 189 
 190   _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
 191 }
 192 
 193 uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const {



 194   uint desired_min_length = 0;
 195   if (use_adaptive_young_list_length()) {
 196     if (_analytics->num_alloc_rate_ms() > 3) {
 197       double now_sec = os::elapsedTime();
 198       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 199       double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
 200       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 201     } else {
 202       // otherwise we don't have enough info to make the prediction
 203     }
 204   }
 205   desired_min_length += base_min_length;
 206   // make sure we don't go below any user-defined minimum bound
 207   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 208 }
 209 
 210 uint G1Policy::calculate_young_list_desired_max_length() const {
 211   // Here, we might want to also take into account any additional
 212   // constraints (i.e., user-defined minimum bound). Currently, we
 213   // effectively don't set this bound.
 214   return _young_gen_sizer->max_desired_young_length();
 215 }
 216 
 217 uint G1Policy::update_young_list_max_and_target_length() {
 218   return update_young_list_max_and_target_length(_analytics->predict_rs_length());
 219 }
 220 
 221 uint G1Policy::update_young_list_max_and_target_length(size_t rs_length) {
 222   uint unbounded_target_length = update_young_list_target_length(rs_length);
 223   update_max_gc_locker_expansion();
 224   return unbounded_target_length;
 225 }
 226 
 227 uint G1Policy::update_young_list_target_length(size_t rs_length) {
 228   YoungTargetLengths young_lengths = young_list_target_lengths(rs_length);
 229   _young_list_target_length = young_lengths.first;
 230 
 231   return young_lengths.second;
 232 }
 233 
 234 G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_length) const {
 235   YoungTargetLengths result;


































 236 
 237   // Calculate the absolute and desired min bounds first.
 238 
 239   // This is how many young regions we already have (currently: the survivors).
 240   const uint base_min_length = _g1h->survivor_regions_count();
 241   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
 242   // This is the absolute minimum young length. Ensure that we
 243   // will at least have one eden region available for allocation.
 244   uint absolute_min_length = base_min_length + MAX2(_g1h->eden_regions_count(), (uint)1);
 245   // If we shrank the young list target it should not shrink below the current size.
 246   desired_min_length = MAX2(desired_min_length, absolute_min_length);
 247   // Calculate the absolute and desired max bounds.
 248 
 249   uint desired_max_length = calculate_young_list_desired_max_length();





 250 
 251   uint young_list_target_length = 0;
 252   if (use_adaptive_young_list_length()) {
 253     if (collector_state()->in_young_only_phase()) {
 254       young_list_target_length =
 255                         calculate_young_list_target_length(rs_length,
 256                                                            base_min_length,
 257                                                            desired_min_length,
 258                                                            desired_max_length);




 259     } else {
 260       // Don't calculate anything and let the code below bound it to
 261       // the desired_min_length, i.e., do the next GC as soon as
 262       // possible to maximize how many old regions we can add to it.
 263     }














 264   } else {
 265     // The user asked for a fixed young gen so we'll fix the young gen
 266     // whether the next GC is young or mixed.
 267     young_list_target_length = _young_list_fixed_length;
 268   }


 269 
 270   result.second = young_list_target_length;
 271 
 272   // We will try our best not to "eat" into the reserve.
 273   uint absolute_max_length = 0;
 274   if (_free_regions_at_end_of_collection > _reserve_regions) {
 275     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;

























































































 276   }
 277   if (desired_max_length > absolute_max_length) {
 278     desired_max_length = absolute_max_length;
 279   }
 280 
 281   // Make sure we don't go over the desired max length, nor under the
 282   // desired min length. In case they clash, desired_min_length wins
 283   // which is why that test is second.
 284   if (young_list_target_length > desired_max_length) {
 285     young_list_target_length = desired_max_length;
 286   }
 287   if (young_list_target_length < desired_min_length) {
 288     young_list_target_length = desired_min_length;
 289   }
 290 
 291   assert(young_list_target_length > base_min_length,
 292          "we should be able to allocate at least one eden region");
 293   assert(young_list_target_length >= absolute_min_length, "post-condition");
 294 
 295   result.first = young_list_target_length;
 296   return result;





 297 }
 298 
 299 uint G1Policy::calculate_young_list_target_length(size_t rs_length,
 300                                                   uint base_min_length,
 301                                                   uint desired_min_length,
 302                                                   uint desired_max_length) const {
 303   assert(use_adaptive_young_list_length(), "pre-condition");
 304   assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
 305 
 306   // In case some edge-condition makes the desired max length too small...
 307   if (desired_max_length <= desired_min_length) {
 308     return desired_min_length;
 309   }
 310 
 311   // We'll adjust min_young_length and max_young_length not to include
 312   // the already allocated young regions (i.e., so they reflect the
 313   // min and max eden regions we'll allocate). The base_min_length
 314   // will be reflected in the predictions by the
 315   // survivor_regions_evac_time prediction.
 316   assert(desired_min_length > base_min_length, "invariant");
 317   uint min_young_length = desired_min_length - base_min_length;
 318   assert(desired_max_length > base_min_length, "invariant");
 319   uint max_young_length = desired_max_length - base_min_length;
 320 
 321   const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 322   const size_t pending_cards = _analytics->predict_pending_cards();
 323   const double base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length);
 324   const uint available_free_regions = _free_regions_at_end_of_collection;
 325   const uint base_free_regions =
 326     available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
 327 
 328   // Here, we will make sure that the shortest young length that
 329   // makes sense fits within the target pause time.
 330 
 331   G1YoungLengthPredictor p(base_time_ms,
 332                            base_free_regions,
 333                            target_pause_time_ms,
 334                            this);
 335   if (p.will_fit(min_young_length)) {
 336     // The shortest young length will fit into the target pause time;
 337     // we'll now check whether the absolute maximum number of young
 338     // regions will fit in the target pause time. If not, we'll do
 339     // a binary search between min_young_length and max_young_length.
 340     if (p.will_fit(max_young_length)) {
 341       // The maximum young length will fit into the target pause time.
 342       // We are done so set min young length to the maximum length (as
 343       // the result is assumed to be returned in min_young_length).
 344       min_young_length = max_young_length;
 345     } else {
 346       // The maximum possible number of young regions will not fit within
 347       // the target pause time so we'll search for the optimal
 348       // length. The loop invariants are:
 349       //
 350       // min_young_length < max_young_length
 351       // min_young_length is known to fit into the target pause time
 352       // max_young_length is known not to fit into the target pause time
 353       //
 354       // Going into the loop we know the above hold as we've just
 355       // checked them. Every time around the loop we check whether
 356       // the middle value between min_young_length and
 357       // max_young_length fits into the target pause time. If it
 358       // does, it becomes the new min. If it doesn't, it becomes
 359       // the new max. This way we maintain the loop invariants.
 360 
 361       assert(min_young_length < max_young_length, "invariant");
 362       uint diff = (max_young_length - min_young_length) / 2;
 363       while (diff > 0) {
 364         uint young_length = min_young_length + diff;
 365         if (p.will_fit(young_length)) {
 366           min_young_length = young_length;
 367         } else {
 368           max_young_length = young_length;
 369         }
 370         assert(min_young_length <  max_young_length, "invariant");
 371         diff = (max_young_length - min_young_length) / 2;
 372       }
 373       // The results is min_young_length which, according to the
 374       // loop invariants, should fit within the target pause time.
 375 
 376       // These are the post-conditions of the binary search above:
 377       assert(min_young_length < max_young_length,
 378              "otherwise we should have discovered that max_young_length "
 379              "fits into the pause target and not done the binary search");
 380       assert(p.will_fit(min_young_length),
 381              "min_young_length, the result of the binary search, should "
 382              "fit into the pause target");
 383       assert(!p.will_fit(min_young_length + 1),
 384              "min_young_length, the result of the binary search, should be "
 385              "optimal, so no larger length should fit into the pause target");
 386     }
 387   } else {
 388     // Even the minimum length doesn't fit into the pause time
 389     // target, return it as the result nevertheless.
 390   }
 391   return base_min_length + min_young_length;



















 392 }
 393 
 394 double G1Policy::predict_survivor_regions_evac_time() const {
 395   double survivor_regions_evac_time = 0.0;
 396   const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
 397   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
 398        it != survivor_regions->end();
 399        ++it) {
 400     survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase());
 401   }
 402   return survivor_regions_evac_time;
 403 }
 404 
 405 G1GCPhaseTimes* G1Policy::phase_times() const {
 406   // Lazy allocation because it must follow initialization of all the
 407   // OopStorage objects by various other subsystems.
 408   if (_phase_times == NULL) {
 409     _phase_times = new G1GCPhaseTimes(_phase_times_timer, ParallelGCThreads);
 410   }
 411   return _phase_times;
 412 }
 413 
 414 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
 415   guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
 416 
 417   if (rs_length > _rs_length_prediction) {
 418     // add 10% to avoid having to recalculate often
 419     size_t rs_length_prediction = rs_length * 1100 / 1000;
 420     update_rs_length_prediction(rs_length_prediction);
 421 
 422     update_young_list_max_and_target_length(rs_length_prediction);
 423   }
 424 }
 425 
 426 void G1Policy::update_rs_length_prediction() {
 427   update_rs_length_prediction(_analytics->predict_rs_length());
 428 }
 429 
 430 void G1Policy::update_rs_length_prediction(size_t prediction) {
 431   if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
 432     _rs_length_prediction = prediction;
 433   }
 434 }
 435 
 436 void G1Policy::record_full_collection_start() {
 437   _full_collection_start_sec = os::elapsedTime();
 438   // Release the future to-space so that it is available for compaction into.
 439   collector_state()->set_in_young_only_phase(false);
 440   collector_state()->set_in_full_gc(true);
 441   _collection_set->clear_candidates();
 442   _pending_cards_at_gc_start = 0;


 450   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 451 
 452   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 453 
 454   collector_state()->set_in_full_gc(false);
 455 
 456   // "Nuke" the heuristics that control the young/mixed GC
 457   // transitions and make sure we start with young GCs after the Full GC.
 458   collector_state()->set_in_young_only_phase(true);
 459   collector_state()->set_in_young_gc_before_mixed(false);
 460   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 461   collector_state()->set_in_concurrent_start_gc(false);
 462   collector_state()->set_mark_or_rebuild_in_progress(false);
 463   collector_state()->set_clearing_next_bitmap(false);
 464 
 465   _eden_surv_rate_group->start_adding_regions();
 466   // also call this on any additional surv rate groups
 467 
 468   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 469   _survivor_surv_rate_group->reset();
 470   update_young_list_max_and_target_length();
 471   update_rs_length_prediction();
 472 
 473   _old_gen_alloc_tracker.reset_after_full_gc();
 474 
 475   record_pause(FullGC, _full_collection_start_sec, end_sec);
 476 }
 477 
 478 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
 479   log_debug(gc, refine, stats)
 480            ("%s refinement: %.2fms, refined: " SIZE_FORMAT
 481             ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
 482             kind,
 483             stats.refinement_time().seconds() * MILLIUNITS,
 484             stats.refined_cards(),
 485             stats.precleaned_cards(),
 486             stats.dirtied_cards());
 487 }
 488 
 489 void G1Policy::record_concurrent_refinement_stats() {
 490   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();


 784     // During mixed gc we do not use them for young gen sizing.
 785     if (is_young_only_pause(this_pause)) {
 786       _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
 787       _analytics->report_rs_length((double) _rs_length);
 788     }
 789   }
 790 
 791   assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
 792          "If the last pause has been concurrent start, we should not have been in the marking window");
 793   if (is_concurrent_start_pause(this_pause)) {
 794     collector_state()->set_mark_or_rebuild_in_progress(true);
 795   }
 796 
 797   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 798 
 799   update_rs_length_prediction();
 800 
 801   // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
 802   // that in this case we are not running in a "normal" operating mode.
 803   if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
 804     // IHOP control wants to know the expected young gen length if it were not
 805     // restrained by the heap reserve. Using the actual length would make the
 806     // prediction too small and the limit the young gen every time we get to the
 807     // predicted target occupancy.
 808     size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
 809 
 810     _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
 811     update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
 812                            _old_gen_alloc_tracker.last_cycle_old_bytes(),
 813                            last_unrestrained_young_length * HeapRegion::GrainBytes,
 814                            is_young_only_pause(this_pause));
 815 
 816     _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
 817   } else {
 818     // Any garbage collection triggered as periodic collection resets the time-to-mixed
 819     // measurement. Periodic collection typically means that the application is "inactive", i.e.
 820     // the marking threads may have received an uncharacterisic amount of cpu time
 821     // for completing the marking, i.e. are faster than expected.
 822     // This skews the predicted marking length towards smaller values which might cause
 823     // the mark start being too late.
 824     _concurrent_start_to_mixed.reset();
 825   }
 826 
 827   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
 828   double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 829 
 830   if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
 831     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
 832                                 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
 833                                 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);


 843                               scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
 844 
 845   _g1h->concurrent_refine()->adjust(logged_cards_time,
 846                                     phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
 847                                     scan_logged_cards_time_goal_ms);
 848 }
 849 
 850 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
 851   if (G1UseAdaptiveIHOP) {
 852     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
 853                                      predictor,
 854                                      G1ReservePercent,
 855                                      G1HeapWastePercent);
 856   } else {
 857     return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
 858   }
 859 }
 860 
 861 void G1Policy::update_ihop_prediction(double mutator_time_s,
 862                                       size_t mutator_alloc_bytes,
 863                                       size_t young_gen_size,
 864                                       bool this_gc_was_young_only) {
 865   // Always try to update IHOP prediction. Even evacuation failures give information
 866   // about e.g. whether to start IHOP earlier next time.
 867 
 868   // Avoid using really small application times that might create samples with
 869   // very high or very low values. They may be caused by e.g. back-to-back gcs.
 870   double const min_valid_time = 1e-6;
 871 
 872   bool report = false;
 873 
 874   double marking_to_mixed_time = -1.0;
 875   if (!this_gc_was_young_only && _concurrent_start_to_mixed.has_result()) {
 876     marking_to_mixed_time = _concurrent_start_to_mixed.last_marking_time();
 877     assert(marking_to_mixed_time > 0.0,
 878            "Concurrent start to mixed time must be larger than zero but is %.3f",
 879            marking_to_mixed_time);
 880     if (marking_to_mixed_time > min_valid_time) {
 881       _ihop_control->update_marking_length(marking_to_mixed_time);
 882       report = true;
 883     }
 884   }
 885 
 886   // As an approximation for the young gc promotion rates during marking we use
 887   // all of them. In many applications there are only a few if any young gcs during
 888   // marking, which makes any prediction useless. This increases the accuracy of the
 889   // prediction.
 890   if (this_gc_was_young_only && mutator_time_s > min_valid_time) {





 891     _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
 892     report = true;
 893   }
 894 
 895   if (report) {
 896     report_ihop_statistics();
 897   }
 898 }
 899 
 900 void G1Policy::report_ihop_statistics() {
 901   _ihop_control->print();
 902 }
 903 
 904 void G1Policy::print_phases() {
 905   phase_times()->print();
 906 }
 907 
 908 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
 909                                               size_t rs_length) const {
 910   size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());


 977 
 978 bool G1Policy::can_expand_young_list() const {
 979   uint young_list_length = _g1h->young_regions_count();
 980   uint young_list_max_length = _young_list_max_length;
 981   return young_list_length < young_list_max_length;
 982 }
 983 
 984 bool G1Policy::use_adaptive_young_list_length() const {
 985   return _young_gen_sizer->use_adaptive_young_list_length();
 986 }
 987 
 988 size_t G1Policy::desired_survivor_size(uint max_regions) const {
 989   size_t const survivor_capacity = HeapRegion::GrainWords * max_regions;
 990   return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
 991 }
 992 
 993 void G1Policy::print_age_table() {
 994   _survivors_age_table.print_age_table(_tenuring_threshold);
 995 }
 996 
 997 void G1Policy::update_max_gc_locker_expansion() {
 998   uint expansion_region_num = 0;
 999   if (GCLockerEdenExpansionPercent > 0) {
1000     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1001     double expansion_region_num_d = perc * (double) _young_list_target_length;
1002     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1003     // less than 1.0) we'll get 1.
1004     expansion_region_num = (uint) ceil(expansion_region_num_d);
1005   } else {
1006     assert(expansion_region_num == 0, "sanity");
1007   }
1008   _young_list_max_length = _young_list_target_length + expansion_region_num;
1009   assert(_young_list_target_length <= _young_list_max_length, "post-condition");

1010 }
1011 
1012 // Calculates survivor space parameters.
1013 void G1Policy::update_survivors_policy() {
1014   double max_survivor_regions_d =
1015                  (double) _young_list_target_length / (double) SurvivorRatio;
1016 
1017   // Calculate desired survivor size based on desired max survivor regions (unconstrained
1018   // by remaining heap). Otherwise we may cause undesired promotions as we are
1019   // already getting close to end of the heap, impacting performance even more.
1020   uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1021   size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1022 
1023   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1024   if (UsePerfData) {
1025     _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1026     _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1027   }
1028   // The real maximum survivor size is bounded by the number of regions that can
1029   // be allocated into.


1224       if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1225         _concurrent_start_to_mixed.record_concurrent_start_end(end);
1226       }
1227       break;
1228     case MixedGC:
1229       _concurrent_start_to_mixed.record_mixed_gc_start(start);
1230       break;
1231     default:
1232       ShouldNotReachHere();
1233   }
1234 }
1235 
1236 void G1Policy::abort_time_to_mixed_tracking() {
1237   _concurrent_start_to_mixed.reset();
1238 }
1239 
1240 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1241                                        const char* false_action_str) const {
1242   G1CollectionSetCandidates* candidates = _collection_set->candidates();
1243 
1244   if (candidates->is_empty()) {

1245     log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);

1246     return false;
1247   }
1248 
1249   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1250   size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1251   double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1252   double threshold = (double) G1HeapWastePercent;
1253   if (reclaimable_percent <= threshold) {

1254     log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1255                         false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);

1256     return false;
1257   }

1258   log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1259                       true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);

1260   return true;
1261 }
1262 
1263 uint G1Policy::calc_min_old_cset_length() const {
1264   // The min old CSet region bound is based on the maximum desired
1265   // number of mixed GCs after a cycle. I.e., even if some old regions
1266   // look expensive, we should add them to the CSet anyway to make
1267   // sure we go through the available old regions in no more than the
1268   // maximum desired number of mixed GCs.
1269   //
1270   // The calculation is based on the number of marked regions we added
1271   // to the CSet candidates in the first place, not how many remain, so
1272   // that the result is the same during all mixed GCs that follow a cycle.
1273 
1274   const size_t region_num = _collection_set->candidates()->num_regions();
1275   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1276   size_t result = region_num / gc_num;
1277   // emulate ceiling
1278   if (result * gc_num < region_num) {
1279     result += 1;




  29 #include "gc/g1/g1CollectionSet.hpp"
  30 #include "gc/g1/g1CollectionSetCandidates.hpp"
  31 #include "gc/g1/g1ConcurrentMark.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1ConcurrentRefine.hpp"
  34 #include "gc/g1/g1ConcurrentRefineStats.hpp"
  35 #include "gc/g1/g1CollectionSetChooser.hpp"
  36 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
  37 #include "gc/g1/g1HotCardCache.hpp"
  38 #include "gc/g1/g1IHOPControl.hpp"
  39 #include "gc/g1/g1GCPhaseTimes.hpp"
  40 #include "gc/g1/g1Policy.hpp"
  41 #include "gc/g1/g1SurvivorRegions.hpp"
  42 #include "gc/g1/g1YoungGenSizer.hpp"
  43 #include "gc/g1/heapRegion.inline.hpp"
  44 #include "gc/g1/heapRegionRemSet.hpp"
  45 #include "gc/shared/concurrentGCBreakpoints.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "logging/log.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/globals.hpp"
  50 #include "runtime/java.hpp"
  51 #include "runtime/mutexLocker.hpp"
  52 #include "utilities/debug.hpp"
  53 #include "utilities/growableArray.hpp"
  54 #include "utilities/pair.hpp"
  55 
  56 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  57   _predictor(G1ConfidencePercent / 100.0),
  58   _analytics(new G1Analytics(&_predictor)),
  59   _remset_tracker(),
  60   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
  61   _ihop_control(create_ihop_control(&_predictor)),
  62   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  63   _full_collection_start_sec(0.0),
  64   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
  65   _young_list_desired_length(0),
  66   _young_list_target_length(0),

  67   _young_list_max_length(0),
  68   _eden_surv_rate_group(new G1SurvRateGroup()),
  69   _survivor_surv_rate_group(new G1SurvRateGroup()),
  70   _reserve_factor((double) G1ReservePercent / 100.0),
  71   _reserve_regions(0),
  72   _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
  73   _free_regions_at_end_of_collection(0),
  74   _rs_length(0),
  75   _rs_length_prediction(0),
  76   _pending_cards_at_gc_start(0),
  77   _old_gen_alloc_tracker(),
  78   _concurrent_start_to_mixed(),
  79   _collection_set(NULL),
  80   _g1h(NULL),
  81   _phase_times_timer(gc_timer),
  82   _phase_times(NULL),
  83   _mark_remark_start_sec(0),
  84   _mark_cleanup_start_sec(0),
  85   _tenuring_threshold(MaxTenuringThreshold),
  86   _max_survivor_regions(0),


  92   delete _ihop_control;
  93   delete _young_gen_sizer;
  94 }
  95 
  96 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
  97   if (G1Arguments::is_heterogeneous_heap()) {
  98     return new G1HeterogeneousHeapPolicy(gc_timer_stw);
  99   } else {
 100     return new G1Policy(gc_timer_stw);
 101   }
 102 }
 103 
 104 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
 105 
 106 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
 107   _g1h = g1h;
 108   _collection_set = collection_set;
 109 
 110   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 111 



 112   _young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
 113 
 114   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 115 
 116   update_young_length_bounds();
 117   // We may immediately start allocating regions and placing them on the
 118   // collection set list. Initialize the per-collection set info
 119   _collection_set->start_incremental_building();
 120 }
 121 
 122 void G1Policy::note_gc_start() {
 123   phase_times()->note_gc_start();
 124 }
 125 
 126 class G1YoungLengthPredictor {
 127   const double _base_time_ms;
 128   const double _base_free_regions;
 129   const double _target_pause_time_ms;
 130   const G1Policy* const _policy;
 131 
 132  public:
 133   G1YoungLengthPredictor(double base_time_ms,
 134                          double base_free_regions,
 135                          double target_pause_time_ms,
 136                          const G1Policy* policy) :


 171       return false;
 172     }
 173 
 174     // success!
 175     return true;
 176   }
 177 };
 178 
 179 void G1Policy::record_new_heap_size(uint new_number_of_regions) {
 180   // re-calculate the necessary reserve
 181   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
 182   // We use ceiling so that if reserve_regions_d is > 0.0 (but
 183   // smaller than 1.0) we'll get 1.
 184   _reserve_regions = (uint) ceil(reserve_regions_d);
 185 
 186   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 187 
 188   _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
 189 }
 190 
 191 uint G1Policy::calculate_desired_eden_length_by_mmu() const {
 192   // One could argue that any useful eden length to keep any MMU would be 1, but
 193   // in theory this is possible. Other constraints enforce a minimum eden of 1
 194   // anyway.
 195   uint desired_min_length = 0;
 196   if (use_adaptive_young_list_length()) {

 197     double now_sec = os::elapsedTime();
 198     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 199     double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
 200     desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);



 201   }
 202   return desired_min_length;



















 203 }
 204 
 205 void G1Policy::update_young_length_bounds() {
 206   update_young_length_bounds(_analytics->predict_rs_length());



 207 }
 208 
 209 void G1Policy::update_young_length_bounds(size_t rs_length) {
 210   _young_list_desired_length = calculate_young_desired_length(rs_length);
 211   _young_list_target_length = calculate_young_target_length(_young_list_desired_length);
 212   _young_list_max_length = calculate_young_max_length(_young_list_target_length);
 213 
 214   log_debug(gc,ergo,heap)("Young list lengths: desired: %u, target: %u, max: %u",
 215                           _young_list_desired_length,
 216                           _young_list_target_length,
 217                           _young_list_max_length);
 218 }
 219 
 220 // Calculates desired young gen length. It is calculated from:
 221 //
 222 // - sizer min/max bounds on young gen
 223 // - pause time goal for whole young gen evacuation
 224 // - MMU goal influencing eden to make GCs spaced apart.
 225 // - a minimum one eden region length.
 226 //
 227 // We may enter with already allocated eden and survivor regions, that may be
 228 // higher than the maximum, or the above goals may result in a desired value
 229 // smaller than are already allocated.
 230 // The main reason is revising young length, with our without the GCLocker being
 231 // active.
 232 //
 233 uint G1Policy::calculate_young_desired_length(size_t rs_length) const {
 234   uint min_young_length_by_sizer = _young_gen_sizer->min_desired_young_length();
 235   uint max_young_length_by_sizer = _young_gen_sizer->max_desired_young_length();
 236 
 237   assert(min_young_length_by_sizer >= 1, "invariant");
 238   assert(max_young_length_by_sizer >= min_young_length_by_sizer, "invariant");
 239 
 240   // Absolute minimum eden length.
 241   // Enforcing a minimum eden length helps at startup when the predictors are not
 242   // yet trained on the application to avoid unnecessary (but very short) full gcs
 243   // on very small (initial) heaps.
 244   uint const MinDesiredEdenLength = 1;
 245 
 246   // Calculate the absolute and desired min bounds first.
 247 
 248   // This is how many survivor regions we already have.
 249   const uint survivor_length = _g1h->survivor_regions_count();
 250   // Size of the already allocated young gen.
 251   const uint allocated_young_length = _g1h->young_regions_count();
 252   // This is the absolute minimum young length that we can return. Ensure that we
 253   // don't go below any user-defined minimum bound; but we might have already
 254   // allocated more than that for reasons. In this case, use that.
 255   uint absolute_min_young_length = MAX2(allocated_young_length, min_young_length_by_sizer);
 256   // Calculate the absolute max bounds. After evac failure or when revising the
 257   // young length we might have exceeded absolute min length or absolute_max_length,
 258   // so adjust the result accordingly.
 259   uint absolute_max_young_length = MAX2(max_young_length_by_sizer, absolute_min_young_length);
 260 
 261   uint desired_eden_length_by_mmu = 0;
 262   uint desired_eden_length_by_pause = 0;
 263   uint desired_eden_length_before_mixed = 0;
 264 
 265   uint desired_young_length = 0;
 266   if (use_adaptive_young_list_length()) {
 267     desired_eden_length_by_mmu = calculate_desired_eden_length_by_mmu();
 268 
 269     const size_t pending_cards = _analytics->predict_pending_cards();
 270     double survivor_base_time_ms = predict_base_elapsed_time_ms(pending_cards, rs_length);
 271 
 272     if (!next_gc_should_be_mixed(NULL, NULL)) {
 273       desired_eden_length_by_pause =
 274         calculate_desired_eden_length_by_pause(survivor_base_time_ms,
 275                                                absolute_min_young_length - survivor_length,
 276                                                absolute_max_young_length - survivor_length);
 277     } else {
 278       desired_eden_length_before_mixed =
 279         calculate_desired_eden_length_before_mixed(survivor_base_time_ms,
 280                                                    absolute_min_young_length - survivor_length,
 281                                                    absolute_max_young_length - survivor_length);
 282     }
 283     // Above either sets desired_eden_length_by_pause or desired_eden_length_before_mixed,
 284     // the other is zero. Use the one that has been set below.
 285     uint desired_eden_length = MAX2(desired_eden_length_by_pause,
 286                                     desired_eden_length_before_mixed);
 287 
 288     // Finally incorporate MMU concerns; assume that it overrides the pause time
 289     // goal, as the default value has been chosen to effectively disable it.
 290     // Also request at least one eden region, see above for reasons.
 291     desired_eden_length = MAX3(desired_eden_length,
 292                                desired_eden_length_by_mmu,
 293                                MinDesiredEdenLength);
 294 
 295     desired_young_length = desired_eden_length + survivor_length;
 296   } else {
 297     // The user asked for a fixed young gen so we'll fix the young gen
 298     // whether the next GC is young or mixed.
 299     desired_young_length = min_young_length_by_sizer;
 300   }
 301   // Clamp to absolute min/max after we determined desired lengths.
 302   desired_young_length = clamp(desired_young_length, absolute_min_young_length, absolute_max_young_length);
 303 
 304   log_trace(gc, ergo, heap)("Young desired length %u "
 305                             "survivor length %u "
 306                             "allocated young length %u "
 307                             "absolute min young length %u "
 308                             "absolute max young length %u "
 309                             "desired eden length by mmu %u "
 310                             "desired eden length by pause %u "
 311                             "desired eden length before mixed %u"
 312                             "desired eden length by default %u",
 313                             desired_young_length, survivor_length,
 314                             allocated_young_length, absolute_min_young_length,
 315                             absolute_max_young_length, desired_eden_length_by_mmu,
 316                             desired_eden_length_by_pause,
 317                             desired_eden_length_before_mixed,
 318                             MinDesiredEdenLength);
 319 
 320   assert(desired_young_length >= allocated_young_length, "must be");
 321   return desired_young_length;
 322 }
 323 
 324 // Limit the desired (wished) young length by current free regions. If the request
 325 // can be satisfied without using up reserve regions, do so, otherwise eat into
 326 // the reserve, giving away at most what the heap sizer allows.
 327 uint G1Policy::calculate_young_target_length(uint desired_young_length) const {
 328   uint allocated_young_length = _g1h->young_regions_count();
 329 
 330   uint receiving_additional_eden;
 331   if (allocated_young_length >= desired_young_length) {
 332     // Already used up all we actually want (may happen as G1 revises the
 333     // young list length concurrently, or caused by gclocker). Do not allow more,
 334     // potentially resulting in GC.
 335     receiving_additional_eden = 0;
 336     log_trace(gc, ergo, heap)("Young target length: Already used up desired young %u allocated %u",
 337                               desired_young_length,
 338                               allocated_young_length);
 339   } else {
 340     // Now look at how many free regions are there currently, and the heap reserve.
 341     // We will try our best not to "eat" into the reserve as long as we can. If we
 342     // do, we at most eat the sizer's minimum regions into the reserve or half the
 343     // reserve rounded up (if possible; this is an arbitrary value).
 344 
 345     uint max_to_eat_into_reserve = MIN2(_young_gen_sizer->min_desired_young_length(),
 346                                         (_reserve_regions + 1) / 2);
 347 
 348     log_trace(gc, ergo, heap)("Young target length: Common "
 349                               "free regions at end of collection %u "
 350                               "desired young length %u "
 351                               "reserve region %u "
 352                               "max to eat into reserve %u",
 353                               _free_regions_at_end_of_collection,
 354                               desired_young_length,
 355                               _reserve_regions,
 356                               max_to_eat_into_reserve);
 357 
 358     if (_free_regions_at_end_of_collection <= _reserve_regions) {
 359       // Fully eat (or already eating) into the reserve, hand back at most absolute_min_length regions.
 360       uint receiving_young = MIN3(_free_regions_at_end_of_collection,
 361                                   desired_young_length,
 362                                   max_to_eat_into_reserve);
 363       // We could already have allocated more regions than what we could get
 364       // above.
 365       receiving_additional_eden = allocated_young_length < receiving_young ?
 366                                   receiving_young - allocated_young_length : 0;
 367 
 368       log_trace(gc, ergo, heap)("Young target length: Fully eat into reserve "
 369                                 "receiving young %u receiving additional eden %u",
 370                                 receiving_young,
 371                                 receiving_additional_eden);
 372     } else if (_free_regions_at_end_of_collection < (desired_young_length + _reserve_regions)) {
 373       // Partially eat into the reserve, at most max_to_eat_into_reserve regions.
 374       uint free_outside_reserve = _free_regions_at_end_of_collection - _reserve_regions;
 375       assert(free_outside_reserve < desired_young_length,
 376              "must be %u %u",
 377              free_outside_reserve, desired_young_length);
 378 
 379       uint receiving_within_reserve = MIN2(desired_young_length - free_outside_reserve,
 380                                            max_to_eat_into_reserve);
 381       uint receiving_young = free_outside_reserve + receiving_within_reserve;
 382       // Again, we could have already allocated more than we could get.
 383       receiving_additional_eden = allocated_young_length < receiving_young ?
 384                                   receiving_young - allocated_young_length : 0;
 385 
 386       log_trace(gc, ergo, heap)("Young target length: Partially eat into reserve "
 387                                 "free outside reserve %u "
 388                                 "receiving within reserve %u "
 389                                 "receiving young %u "
 390                                 "receiving additional eden %u",
 391                                 free_outside_reserve, receiving_within_reserve,
 392                                 receiving_young, receiving_additional_eden);
 393     } else {
 394       // No need to use the reserve.
 395       receiving_additional_eden = desired_young_length - allocated_young_length;
 396       log_trace(gc, ergo, heap)("Young target length: No need to use reserve "
 397                                 "receiving additional eden %u",
 398                                 receiving_additional_eden);
 399     }


 400   }
 401 
 402   uint target_young_length = allocated_young_length + receiving_additional_eden;








 403 
 404   assert(target_young_length >= allocated_young_length, "must be");


 405 
 406   log_trace(gc, ergo, heap)("Young target length: "
 407                             "young target length %u "
 408                             "allocated young length %u "
 409                             "received additional eden %u",
 410                             target_young_length, allocated_young_length,
 411                             receiving_additional_eden);
 412   return target_young_length;
 413 }
 414 
 415 uint G1Policy::calculate_desired_eden_length_by_pause(double base_time_ms,
 416                                                       uint min_eden_length,
 417                                                       uint max_eden_length) const {

 418   assert(use_adaptive_young_list_length(), "pre-condition");

 419 
 420   assert(min_eden_length <= max_eden_length, "must be %u %u", min_eden_length, max_eden_length);




















 421 
 422   // Here, we will make sure that the shortest young length that
 423   // makes sense fits within the target pause time.
 424 
 425   G1YoungLengthPredictor p(base_time_ms,
 426                            _free_regions_at_end_of_collection,
 427                            _mmu_tracker->max_gc_time() * 1000.0,
 428                            this);
 429   if (p.will_fit(min_eden_length)) {
 430     // The shortest young length will fit into the target pause time;
 431     // we'll now check whether the absolute maximum number of young
 432     // regions will fit in the target pause time. If not, we'll do
 433     // a binary search between min_young_length and max_young_length.
 434     if (p.will_fit(max_eden_length)) {
 435       // The maximum young length will fit into the target pause time.
 436       // We are done so set min young length to the maximum length (as
 437       // the result is assumed to be returned in min_young_length).
 438       min_eden_length = max_eden_length;
 439     } else {
 440       // The maximum possible number of young regions will not fit within
 441       // the target pause time so we'll search for the optimal
 442       // length. The loop invariants are:
 443       //
 444       // min_young_length < max_young_length
 445       // min_young_length is known to fit into the target pause time
 446       // max_young_length is known not to fit into the target pause time
 447       //
 448       // Going into the loop we know the above hold as we've just
 449       // checked them. Every time around the loop we check whether
 450       // the middle value between min_young_length and
 451       // max_young_length fits into the target pause time. If it
 452       // does, it becomes the new min. If it doesn't, it becomes
 453       // the new max. This way we maintain the loop invariants.
 454 
 455       assert(min_eden_length < max_eden_length, "invariant");
 456       uint diff = (max_eden_length - min_eden_length) / 2;
 457       while (diff > 0) {
 458         uint eden_length = min_eden_length + diff;
 459         if (p.will_fit(eden_length)) {
 460           min_eden_length = eden_length;
 461         } else {
 462           max_eden_length = eden_length;
 463         }
 464         assert(min_eden_length <  max_eden_length, "invariant");
 465         diff = (max_eden_length - min_eden_length) / 2;
 466       }
 467       // The results is min_young_length which, according to the
 468       // loop invariants, should fit within the target pause time.
 469 
 470       // These are the post-conditions of the binary search above:
 471       assert(min_eden_length < max_eden_length,
 472              "otherwise we should have discovered that max_eden_length "
 473              "fits into the pause target and not done the binary search");
 474       assert(p.will_fit(min_eden_length),
 475              "min_eden_length, the result of the binary search, should "
 476              "fit into the pause target");
 477       assert(!p.will_fit(min_eden_length + 1),
 478              "min_eden_length, the result of the binary search, should be "
 479              "optimal, so no larger length should fit into the pause target");
 480     }
 481   } else {
 482     // Even the minimum length doesn't fit into the pause time
 483     // target, return it as the result nevertheless.
 484   }
 485   return min_eden_length;
 486 }
 487 
 488 uint G1Policy::calculate_desired_eden_length_before_mixed(double survivor_base_time_ms,
 489                                                           uint min_eden_length,
 490                                                           uint max_eden_length) const {
 491   G1CollectionSetCandidates* candidates = _collection_set->candidates();
 492 
 493   uint min_old_regions_end = MIN2(candidates->cur_idx() + calc_min_old_cset_length(), candidates->num_regions());
 494   double predicted_region_evac_time_ms = survivor_base_time_ms;
 495   for (uint i = candidates->cur_idx(); i < min_old_regions_end; i++) {
 496     HeapRegion* r = candidates->at(i);
 497     predicted_region_evac_time_ms += predict_region_total_time_ms(r, false);
 498   }
 499   uint desired_eden_length_by_min_cset_length =
 500      calculate_desired_eden_length_by_pause(predicted_region_evac_time_ms,
 501                                             min_eden_length,
 502                                             max_eden_length);
 503 
 504   return desired_eden_length_by_min_cset_length;
 505 }
 506 
 507 double G1Policy::predict_survivor_regions_evac_time() const {
 508   double survivor_regions_evac_time = 0.0;
 509   const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
 510   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
 511        it != survivor_regions->end();
 512        ++it) {
 513     survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase());
 514   }
 515   return survivor_regions_evac_time;
 516 }
 517 
 518 G1GCPhaseTimes* G1Policy::phase_times() const {
 519   // Lazy allocation because it must follow initialization of all the
 520   // OopStorage objects by various other subsystems.
 521   if (_phase_times == NULL) {
 522     _phase_times = new G1GCPhaseTimes(_phase_times_timer, ParallelGCThreads);
 523   }
 524   return _phase_times;
 525 }
 526 
 527 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
 528   guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
 529 
 530   if (rs_length > _rs_length_prediction) {
 531     // add 10% to avoid having to recalculate often
 532     size_t rs_length_prediction = rs_length * 1100 / 1000;
 533     update_rs_length_prediction(rs_length_prediction);
 534     update_young_length_bounds(rs_length_prediction);

 535   }
 536 }
 537 
 538 void G1Policy::update_rs_length_prediction() {
 539   update_rs_length_prediction(_analytics->predict_rs_length());
 540 }
 541 
 542 void G1Policy::update_rs_length_prediction(size_t prediction) {
 543   if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
 544     _rs_length_prediction = prediction;
 545   }
 546 }
 547 
 548 void G1Policy::record_full_collection_start() {
 549   _full_collection_start_sec = os::elapsedTime();
 550   // Release the future to-space so that it is available for compaction into.
 551   collector_state()->set_in_young_only_phase(false);
 552   collector_state()->set_in_full_gc(true);
 553   _collection_set->clear_candidates();
 554   _pending_cards_at_gc_start = 0;


 562   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 563 
 564   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 565 
 566   collector_state()->set_in_full_gc(false);
 567 
 568   // "Nuke" the heuristics that control the young/mixed GC
 569   // transitions and make sure we start with young GCs after the Full GC.
 570   collector_state()->set_in_young_only_phase(true);
 571   collector_state()->set_in_young_gc_before_mixed(false);
 572   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 573   collector_state()->set_in_concurrent_start_gc(false);
 574   collector_state()->set_mark_or_rebuild_in_progress(false);
 575   collector_state()->set_clearing_next_bitmap(false);
 576 
 577   _eden_surv_rate_group->start_adding_regions();
 578   // also call this on any additional surv rate groups
 579 
 580   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 581   _survivor_surv_rate_group->reset();
 582   update_young_length_bounds();
 583   update_rs_length_prediction();
 584 
 585   _old_gen_alloc_tracker.reset_after_full_gc();
 586 
 587   record_pause(FullGC, _full_collection_start_sec, end_sec);
 588 }
 589 
 590 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
 591   log_debug(gc, refine, stats)
 592            ("%s refinement: %.2fms, refined: " SIZE_FORMAT
 593             ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
 594             kind,
 595             stats.refinement_time().seconds() * MILLIUNITS,
 596             stats.refined_cards(),
 597             stats.precleaned_cards(),
 598             stats.dirtied_cards());
 599 }
 600 
 601 void G1Policy::record_concurrent_refinement_stats() {
 602   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();


 896     // During mixed gc we do not use them for young gen sizing.
 897     if (is_young_only_pause(this_pause)) {
 898       _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
 899       _analytics->report_rs_length((double) _rs_length);
 900     }
 901   }
 902 
 903   assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
 904          "If the last pause has been concurrent start, we should not have been in the marking window");
 905   if (is_concurrent_start_pause(this_pause)) {
 906     collector_state()->set_mark_or_rebuild_in_progress(true);
 907   }
 908 
 909   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 910 
 911   update_rs_length_prediction();
 912 
 913   // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
 914   // that in this case we are not running in a "normal" operating mode.
 915   if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
 916     update_young_length_bounds();




 917 
 918     _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
 919     update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
 920                            _old_gen_alloc_tracker.last_cycle_old_bytes(),

 921                            is_young_only_pause(this_pause));
 922 
 923     _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
 924   } else {
 925     // Any garbage collection triggered as periodic collection resets the time-to-mixed
 926     // measurement. Periodic collection typically means that the application is "inactive", i.e.
 927     // the marking threads may have received an uncharacterisic amount of cpu time
 928     // for completing the marking, i.e. are faster than expected.
 929     // This skews the predicted marking length towards smaller values which might cause
 930     // the mark start being too late.
 931     _concurrent_start_to_mixed.reset();
 932   }
 933 
 934   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
 935   double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 936 
 937   if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
 938     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
 939                                 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
 940                                 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);


 950                               scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
 951 
 952   _g1h->concurrent_refine()->adjust(logged_cards_time,
 953                                     phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
 954                                     scan_logged_cards_time_goal_ms);
 955 }
 956 
 957 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
 958   if (G1UseAdaptiveIHOP) {
 959     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
 960                                      predictor,
 961                                      G1ReservePercent,
 962                                      G1HeapWastePercent);
 963   } else {
 964     return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
 965   }
 966 }
 967 
 968 void G1Policy::update_ihop_prediction(double mutator_time_s,
 969                                       size_t mutator_alloc_bytes,

 970                                       bool this_gc_was_young_only) {
 971   // Always try to update IHOP prediction. Even evacuation failures give information
 972   // about e.g. whether to start IHOP earlier next time.
 973 
 974   // Avoid using really small application times that might create samples with
 975   // very high or very low values. They may be caused by e.g. back-to-back gcs.
 976   double const min_valid_time = 1e-6;
 977 
 978   bool report = false;
 979 
 980   double marking_to_mixed_time = -1.0;
 981   if (!this_gc_was_young_only && _concurrent_start_to_mixed.has_result()) {
 982     marking_to_mixed_time = _concurrent_start_to_mixed.last_marking_time();
 983     assert(marking_to_mixed_time > 0.0,
 984            "Concurrent start to mixed time must be larger than zero but is %.3f",
 985            marking_to_mixed_time);
 986     if (marking_to_mixed_time > min_valid_time) {
 987       _ihop_control->update_marking_length(marking_to_mixed_time);
 988       report = true;
 989     }
 990   }
 991 
 992   // As an approximation for the young gc promotion rates during marking we use
 993   // all of them. In many applications there are only a few if any young gcs during
 994   // marking, which makes any prediction useless. This increases the accuracy of the
 995   // prediction.
 996   if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
 997     // IHOP control wants to know the expected young gen length if it were not
 998     // restrained by the heap reserve. Using the actual length would make the
 999     // prediction too small and the limit the young gen every time we get to the
1000     // predicted target occupancy.
1001     size_t young_gen_size = young_list_desired_length() * HeapRegion::GrainBytes;
1002     _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
1003     report = true;
1004   }
1005 
1006   if (report) {
1007     report_ihop_statistics();
1008   }
1009 }
1010 
1011 void G1Policy::report_ihop_statistics() {
1012   _ihop_control->print();
1013 }
1014 
1015 void G1Policy::print_phases() {
1016   phase_times()->print();
1017 }
1018 
1019 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
1020                                               size_t rs_length) const {
1021   size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());


1088 
1089 bool G1Policy::can_expand_young_list() const {
1090   uint young_list_length = _g1h->young_regions_count();
1091   uint young_list_max_length = _young_list_max_length;
1092   return young_list_length < young_list_max_length;
1093 }
1094 
1095 bool G1Policy::use_adaptive_young_list_length() const {
1096   return _young_gen_sizer->use_adaptive_young_list_length();
1097 }
1098 
1099 size_t G1Policy::desired_survivor_size(uint max_regions) const {
1100   size_t const survivor_capacity = HeapRegion::GrainWords * max_regions;
1101   return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
1102 }
1103 
1104 void G1Policy::print_age_table() {
1105   _survivors_age_table.print_age_table(_tenuring_threshold);
1106 }
1107 
1108 uint G1Policy::calculate_young_max_length(uint target_young_length) const {
1109   uint expansion_region_num = 0;
1110   if (GCLockerEdenExpansionPercent > 0) {
1111     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1112     double expansion_region_num_d = perc * (double) _young_list_target_length;
1113     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1114     // less than 1.0) we'll get 1.
1115     expansion_region_num = (uint) ceil(expansion_region_num_d);
1116   } else {
1117     assert(expansion_region_num == 0, "sanity");
1118   }
1119   uint max_length = target_young_length + expansion_region_num;
1120   assert(target_young_length <= max_length, "post-condition");
1121   return max_length;
1122 }
1123 
1124 // Calculates survivor space parameters.
1125 void G1Policy::update_survivors_policy() {
1126   double max_survivor_regions_d =
1127                  (double) _young_list_target_length / (double) SurvivorRatio;
1128 
1129   // Calculate desired survivor size based on desired max survivor regions (unconstrained
1130   // by remaining heap). Otherwise we may cause undesired promotions as we are
1131   // already getting close to end of the heap, impacting performance even more.
1132   uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1133   size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1134 
1135   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1136   if (UsePerfData) {
1137     _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1138     _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1139   }
1140   // The real maximum survivor size is bounded by the number of regions that can
1141   // be allocated into.


1336       if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1337         _concurrent_start_to_mixed.record_concurrent_start_end(end);
1338       }
1339       break;
1340     case MixedGC:
1341       _concurrent_start_to_mixed.record_mixed_gc_start(start);
1342       break;
1343     default:
1344       ShouldNotReachHere();
1345   }
1346 }
1347 
1348 void G1Policy::abort_time_to_mixed_tracking() {
1349   _concurrent_start_to_mixed.reset();
1350 }
1351 
1352 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1353                                        const char* false_action_str) const {
1354   G1CollectionSetCandidates* candidates = _collection_set->candidates();
1355 
1356   if (candidates == NULL || candidates->is_empty()) {
1357     if (false_action_str != NULL) {
1358       log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1359     }
1360     return false;
1361   }
1362 
1363   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1364   size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1365   double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1366   double threshold = (double) G1HeapWastePercent;
1367   if (reclaimable_percent <= threshold) {
1368     if (false_action_str != NULL) {
1369       log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1370                           false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1371     }
1372     return false;
1373   }
1374   if (true_action_str != NULL) {
1375     log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1376                         true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1377   }
1378   return true;
1379 }
1380 
1381 uint G1Policy::calc_min_old_cset_length() const {
1382   // The min old CSet region bound is based on the maximum desired
1383   // number of mixed GCs after a cycle. I.e., even if some old regions
1384   // look expensive, we should add them to the CSet anyway to make
1385   // sure we go through the available old regions in no more than the
1386   // maximum desired number of mixed GCs.
1387   //
1388   // The calculation is based on the number of marked regions we added
1389   // to the CSet candidates in the first place, not how many remain, so
1390   // that the result is the same during all mixed GCs that follow a cycle.
1391 
1392   const size_t region_num = _collection_set->candidates()->num_regions();
1393   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1394   size_t result = region_num / gc_num;
1395   // emulate ceiling
1396   if (result * gc_num < region_num) {
1397     result += 1;


< prev index next >