< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page
rev 10472 : 8151711: Move G1 number sequences out of the G1 collector policy
Reviewed-by:
rev 10473 : [mq]: rename-to-analytics


   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentG1Refine.hpp"
  27 #include "gc/g1/concurrentMarkThread.inline.hpp"

  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1CollectionSet.hpp"
  30 #include "gc/g1/g1CollectorPolicy.hpp"
  31 #include "gc/g1/g1ConcurrentMark.hpp"
  32 #include "gc/g1/g1IHOPControl.hpp"
  33 #include "gc/g1/g1GCPhaseTimes.hpp"
  34 #include "gc/g1/g1YoungGenSizer.hpp"
  35 #include "gc/g1/heapRegion.inline.hpp"
  36 #include "gc/g1/heapRegionRemSet.hpp"
  37 #include "gc/shared/gcPolicyCounters.hpp"
  38 #include "runtime/arguments.hpp"
  39 #include "runtime/java.hpp"
  40 #include "runtime/mutexLocker.hpp"
  41 #include "utilities/debug.hpp"
  42 #include "utilities/pair.hpp"
  43 
  44 // Different defaults for different number of GC threads
  45 // They were chosen by running GCOld and SPECjbb on debris with different
  46 //   numbers of GC threads and choosing them based on the results
  47 
  48 // all the same
  49 static double rs_length_diff_defaults[] = {
  50   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  51 };
  52 
  53 static double cost_per_card_ms_defaults[] = {
  54   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
  55 };
  56 
  57 // all the same
  58 static double young_cards_per_entry_ratio_defaults[] = {
  59   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
  60 };
  61 
  62 static double cost_per_entry_ms_defaults[] = {
  63   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
  64 };
  65 
  66 static double cost_per_byte_ms_defaults[] = {
  67   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
  68 };
  69 
  70 // these should be pretty consistent
  71 static double constant_other_time_ms_defaults[] = {
  72   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
  73 };
  74 
  75 
  76 static double young_other_cost_per_region_ms_defaults[] = {
  77   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
  78 };
  79 
  80 static double non_young_other_cost_per_region_ms_defaults[] = {
  81   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
  82 };
  83 
  84 G1CollectorPolicy::G1CollectorPolicy() :
  85   _predictor(G1ConfidencePercent / 100.0),
  86 
  87   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  88 
  89   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  90   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  91 
  92   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  93   _prev_collection_pause_end_ms(0.0),
  94   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
  95   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  96   _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
  97   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  98   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  99   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 100   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 101   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 102   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 103   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 104   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 105   _non_young_other_cost_per_region_ms_seq(
 106                                          new TruncatedSeq(TruncatedSeqLength)),
 107 
 108   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 109   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 110 
 111   _pause_time_target_ms((double) MaxGCPauseMillis),
 112 
 113   _recent_prev_end_times_for_all_gcs_sec(
 114                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 115 
 116   _recent_avg_pause_time_ratio(0.0),
 117   _rs_lengths_prediction(0),
 118   _max_survivor_regions(0),
 119 
 120   // add here any more surv rate groups
 121   _survivors_age_table(true),
 122 
 123   _gc_overhead_perc(0.0),
 124 
 125   _bytes_allocated_in_old_since_last_gc(0),
 126   _ihop_control(NULL),
 127   _initial_mark_to_mixed() {
 128 
 129   // SurvRateGroups below must be initialized after the predictor because they
 130   // indirectly use it through this object passed to their constructor.
 131   _short_lived_surv_rate_group =
 132     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
 133   _survivor_surv_rate_group =
 134     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 135 
 136   // Set up the region size and associated fields. Given that the
 137   // policy is created before the heap, we have to set this up here,
 138   // so it's done as soon as possible.
 139 
 140   // It would have been natural to pass initial_heap_byte_size() and
 141   // max_heap_byte_size() to setup_heap_region_size() but those have
 142   // not been set up at this point since they should be aligned with
 143   // the region size. So, there is a circular dependency here. We base
 144   // the region size on the heap size, but the heap size should be
 145   // aligned with the region size. To get around this we use the
 146   // unaligned values for the heap.
 147   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
 148   HeapRegionRemSet::setup_remset_size();
 149 
 150   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 151   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 152   clear_ratio_check_data();
 153 
 154   _phase_times = new G1GCPhaseTimes(ParallelGCThreads);
 155 
 156   int index = MIN2(ParallelGCThreads - 1, 7u);
 157 
 158   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 159   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 160   _cost_scan_hcc_seq->add(0.0);
 161   _young_cards_per_entry_ratio_seq->add(
 162                                   young_cards_per_entry_ratio_defaults[index]);
 163   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 164   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 165   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 166   _young_other_cost_per_region_ms_seq->add(
 167                                young_other_cost_per_region_ms_defaults[index]);
 168   _non_young_other_cost_per_region_ms_seq->add(
 169                            non_young_other_cost_per_region_ms_defaults[index]);
 170 
 171   // Below, we might need to calculate the pause time target based on
 172   // the pause interval. When we do so we are going to give G1 maximum
 173   // flexibility and allow it to do pauses when it needs to. So, we'll
 174   // arrange that the pause interval to be pause time target + 1 to
 175   // ensure that a) the pause time target is maximized with respect to
 176   // the pause interval and b) we maintain the invariant that pause
 177   // time target < pause interval. If the user does not want this
 178   // maximum flexibility, they will have to set the pause interval
 179   // explicitly.
 180 
 181   // First make sure that, if either parameter is set, its value is
 182   // reasonable.
 183   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 184     if (MaxGCPauseMillis < 1) {
 185       vm_exit_during_initialization("MaxGCPauseMillis should be "
 186                                     "greater than 0");
 187     }
 188   }
 189   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 190     if (GCPauseIntervalMillis < 1) {


 211   // the pause time target (this will also deal with the case when the
 212   // pause time target is the default value).
 213   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 214     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 215   }
 216 
 217   // Finally, make sure that the two parameters are consistent.
 218   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 219     char buffer[256];
 220     jio_snprintf(buffer, 256,
 221                  "MaxGCPauseMillis (%u) should be less than "
 222                  "GCPauseIntervalMillis (%u)",
 223                  MaxGCPauseMillis, GCPauseIntervalMillis);
 224     vm_exit_during_initialization(buffer);
 225   }
 226 
 227   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 228   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 229   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 230 
 231   // start conservatively (around 50ms is about right)
 232   _concurrent_mark_remark_times_ms->add(0.05);
 233   _concurrent_mark_cleanup_times_ms->add(0.20);
 234   _tenuring_threshold = MaxTenuringThreshold;
 235 
 236   assert(GCTimeRatio > 0,
 237          "we should have set it to a default value set_g1_gc_flags() "
 238          "if a user set it to 0");
 239   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 240 
 241   uintx reserve_perc = G1ReservePercent;
 242   // Put an artificial ceiling on this so that it's not set to a silly value.
 243   if (reserve_perc > 50) {
 244     reserve_perc = 50;
 245     warning("G1ReservePercent is set to a value that is too large, "
 246             "it's been updated to " UINTX_FORMAT, reserve_perc);
 247   }
 248   _reserve_factor = (double) reserve_perc / 100.0;
 249   // This will be set when the heap is expanded
 250   // for the first time during initialization.
 251   _reserve_regions = 0;
 252 
 253   _ihop_control = create_ihop_control();
 254 }
 255 
 256 G1CollectorPolicy::~G1CollectorPolicy() {
 257   delete _ihop_control;
 258 }
 259 
 260 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
 261   return _predictor.get_new_prediction(seq);
 262 }
 263 
 264 size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const {
 265   return (size_t)get_new_prediction(seq);
 266 }
 267 
 268 void G1CollectorPolicy::initialize_alignments() {
 269   _space_alignment = HeapRegion::GrainBytes;
 270   size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
 271   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 272   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 273 }
 274 
 275 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
 276 
 277 void G1CollectorPolicy::post_heap_initialize() {
 278   uintx max_regions = G1CollectedHeap::heap()->max_regions();
 279   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
 280   if (max_young_size != MaxNewSize) {
 281     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
 282   }
 283 }
 284 
 285 void G1CollectorPolicy::initialize_flags() {
 286   if (G1HeapRegionSize != HeapRegion::GrainBytes) {
 287     FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);


 322   phase_times()->note_gc_start(num_active_workers);
 323 }
 324 
 325 // Create the jstat counters for the policy.
 326 void G1CollectorPolicy::initialize_gc_policy_counters() {
 327   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 328 }
 329 
 330 bool G1CollectorPolicy::predict_will_fit(uint young_length,
 331                                          double base_time_ms,
 332                                          uint base_free_regions,
 333                                          double target_pause_time_ms) const {
 334   if (young_length >= base_free_regions) {
 335     // end condition 1: not enough space for the young regions
 336     return false;
 337   }
 338 
 339   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
 340   size_t bytes_to_copy =
 341                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 342   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
 343   double young_other_time_ms = predict_young_other_time_ms(young_length);

 344   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
 345   if (pause_time_ms > target_pause_time_ms) {
 346     // end condition 2: prediction is over the target pause time
 347     return false;
 348   }
 349 
 350   size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
 351 
 352   // When copying, we will likely need more bytes free than is live in the region.
 353   // Add some safety margin to factor in the confidence of our guess, and the
 354   // natural expected waste.
 355   // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
 356   // of the calculation: the lower the confidence, the more headroom.
 357   // (100 + TargetPLABWastePct) represents the increase in expected bytes during
 358   // copying due to anticipated waste in the PLABs.
 359   double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
 360   size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
 361 
 362   if (expected_bytes_to_copy > free_bytes) {
 363     // end condition 3: out-of-space


 367   // success!
 368   return true;
 369 }
 370 
 371 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
 372   // re-calculate the necessary reserve
 373   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
 374   // We use ceiling so that if reserve_regions_d is > 0.0 (but
 375   // smaller than 1.0) we'll get 1.
 376   _reserve_regions = (uint) ceil(reserve_regions_d);
 377 
 378   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 379 
 380   _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
 381 }
 382 
 383 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
 384                                                        uint base_min_length) const {
 385   uint desired_min_length = 0;
 386   if (adaptive_young_list_length()) {
 387     if (_alloc_rate_ms_seq->num() > 3) {
 388       double now_sec = os::elapsedTime();
 389       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 390       double alloc_rate_ms = predict_alloc_rate_ms();
 391       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 392     } else {
 393       // otherwise we don't have enough info to make the prediction
 394     }
 395   }
 396   desired_min_length += base_min_length;
 397   // make sure we don't go below any user-defined minimum bound
 398   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 399 }
 400 
 401 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
 402   // Here, we might want to also take into account any additional
 403   // constraints (i.e., user-defined minimum bound). Currently, we
 404   // effectively don't set this bound.
 405   return _young_gen_sizer->max_desired_young_length();
 406 }
 407 
 408 uint G1CollectorPolicy::update_young_list_max_and_target_length() {
 409   return update_young_list_max_and_target_length(predict_rs_lengths());
 410 }
 411 
 412 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
 413   uint unbounded_target_length = update_young_list_target_length(rs_lengths);
 414   update_max_gc_locker_expansion();
 415   return unbounded_target_length;
 416 }
 417 
 418 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
 419   YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
 420   _young_list_target_length = young_lengths.first;
 421   return young_lengths.second;
 422 }
 423 
 424 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const {
 425   YoungTargetLengths result;
 426 
 427   // Calculate the absolute and desired min bounds first.
 428 
 429   // This is how many young regions we already have (currently: the survivors).


 494   assert(adaptive_young_list_length(), "pre-condition");
 495   assert(collector_state()->gcs_are_young(), "only call this for young GCs");
 496 
 497   // In case some edge-condition makes the desired max length too small...
 498   if (desired_max_length <= desired_min_length) {
 499     return desired_min_length;
 500   }
 501 
 502   // We'll adjust min_young_length and max_young_length not to include
 503   // the already allocated young regions (i.e., so they reflect the
 504   // min and max eden regions we'll allocate). The base_min_length
 505   // will be reflected in the predictions by the
 506   // survivor_regions_evac_time prediction.
 507   assert(desired_min_length > base_min_length, "invariant");
 508   uint min_young_length = desired_min_length - base_min_length;
 509   assert(desired_max_length > base_min_length, "invariant");
 510   uint max_young_length = desired_max_length - base_min_length;
 511 
 512   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 513   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
 514   size_t pending_cards = get_new_size_prediction(_pending_cards_seq);
 515   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
 516   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
 517   double base_time_ms =
 518     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
 519     survivor_regions_evac_time;
 520   uint available_free_regions = _free_regions_at_end_of_collection;
 521   uint base_free_regions = 0;
 522   if (available_free_regions > _reserve_regions) {
 523     base_free_regions = available_free_regions - _reserve_regions;
 524   }
 525 
 526   // Here, we will make sure that the shortest young length that
 527   // makes sense fits within the target pause time.
 528 
 529   if (predict_will_fit(min_young_length, base_time_ms,
 530                        base_free_regions, target_pause_time_ms)) {
 531     // The shortest young length will fit into the target pause time;
 532     // we'll now check whether the absolute maximum number of young
 533     // regions will fit in the target pause time. If not, we'll do
 534     // a binary search between min_young_length and max_young_length.
 535     if (predict_will_fit(max_young_length, base_time_ms,
 536                          base_free_regions, target_pause_time_ms)) {


 596        r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region();
 597        r = r->get_next_young_region()) {
 598     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
 599   }
 600   return survivor_regions_evac_time;
 601 }
 602 
 603 void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
 604   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 605 
 606   if (rs_lengths > _rs_lengths_prediction) {
 607     // add 10% to avoid having to recalculate often
 608     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 609     update_rs_lengths_prediction(rs_lengths_prediction);
 610 
 611     update_young_list_max_and_target_length(rs_lengths_prediction);
 612   }
 613 }
 614 
 615 void G1CollectorPolicy::update_rs_lengths_prediction() {
 616   update_rs_lengths_prediction(predict_rs_lengths());
 617 }
 618 
 619 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
 620   if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
 621     _rs_lengths_prediction = prediction;
 622   }
 623 }
 624 
 625 #ifndef PRODUCT
 626 bool G1CollectorPolicy::verify_young_ages() {
 627   HeapRegion* head = _g1->young_list()->first_region();
 628   return
 629     verify_young_ages(head, _short_lived_surv_rate_group);
 630   // also call verify_young_ages on any additional surv rate groups
 631 }
 632 
 633 bool
 634 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 635                                      SurvRateGroup *surv_rate_group) {
 636   guarantee( surv_rate_group != NULL, "pre-condition" );


 664     }
 665   }
 666 
 667   return ret;
 668 }
 669 #endif // PRODUCT
 670 
 671 void G1CollectorPolicy::record_full_collection_start() {
 672   _full_collection_start_sec = os::elapsedTime();
 673   // Release the future to-space so that it is available for compaction into.
 674   collector_state()->set_full_collection(true);
 675 }
 676 
 677 void G1CollectorPolicy::record_full_collection_end() {
 678   // Consider this like a collection pause for the purposes of allocation
 679   // since last pause.
 680   double end_sec = os::elapsedTime();
 681   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 682   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 683 
 684   update_recent_gc_times(end_sec, full_gc_time_ms);
 685 
 686   collector_state()->set_full_collection(false);
 687 
 688   // "Nuke" the heuristics that control the young/mixed GC
 689   // transitions and make sure we start with young GCs after the Full GC.
 690   collector_state()->set_gcs_are_young(true);
 691   collector_state()->set_last_young_gc(false);
 692   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 693   collector_state()->set_during_initial_mark_pause(false);
 694   collector_state()->set_in_marking_window(false);
 695   collector_state()->set_in_marking_window_im(false);
 696 
 697   _short_lived_surv_rate_group->start_adding_regions();
 698   // also call this on any additional surv rate groups
 699 
 700   _free_regions_at_end_of_collection = _g1->num_free_regions();
 701   // Reset survivors SurvRateGroup.
 702   _survivor_surv_rate_group->reset();
 703   update_young_list_max_and_target_length();
 704   update_rs_lengths_prediction();


 732   _survivors_age_table.clear();
 733 
 734   assert( verify_young_ages(), "region age verification" );
 735 }
 736 
 737 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 738                                                    mark_init_elapsed_time_ms) {
 739   collector_state()->set_during_marking(true);
 740   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 741   collector_state()->set_during_initial_mark_pause(false);
 742 }
 743 
 744 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 745   _mark_remark_start_sec = os::elapsedTime();
 746   collector_state()->set_during_marking(false);
 747 }
 748 
 749 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 750   double end_time_sec = os::elapsedTime();
 751   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 752   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 753   _prev_collection_pause_end_ms += elapsed_time_ms;
 754 
 755   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
 756 }
 757 
 758 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 759   _mark_cleanup_start_sec = os::elapsedTime();
 760 }
 761 
 762 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 763   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
 764                                                               "skip last young-only gc");
 765   collector_state()->set_last_young_gc(should_continue_with_reclaim);
 766   // We skip the marking phase.
 767   if (!should_continue_with_reclaim) {
 768     abort_time_to_mixed_tracking();
 769   }
 770   collector_state()->set_in_marking_window(false);
 771 }
 772 
 773 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {


 832 
 833 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
 834   double end_time_sec = os::elapsedTime();
 835 
 836   size_t cur_used_bytes = _g1->used();
 837   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 838   bool last_pause_included_initial_mark = false;
 839   bool update_stats = !_g1->evacuation_failed();
 840 
 841   NOT_PRODUCT(_short_lived_surv_rate_group->print());
 842 
 843   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 844 
 845   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
 846   if (last_pause_included_initial_mark) {
 847     record_concurrent_mark_init_end(0.0);
 848   } else {
 849     maybe_start_marking();
 850   }
 851 
 852   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 853   if (app_time_ms < MIN_TIMER_GRANULARITY) {
 854     // This usually happens due to the timer not having the required
 855     // granularity. Some Linuxes are the usual culprits.
 856     // We'll just set it to something (arbitrarily) small.
 857     app_time_ms = 1.0;
 858   }
 859 
 860   if (update_stats) {
 861     // We maintain the invariant that all objects allocated by mutator
 862     // threads will be allocated out of eden regions. So, we can use
 863     // the eden region number allocated since the previous GC to
 864     // calculate the application's allocate rate. The only exception
 865     // to that is humongous objects that are allocated separately. But
 866     // given that humongous object allocations do not really affect
 867     // either the pause's duration nor when the next pause will take
 868     // place we can safely ignore them here.
 869     uint regions_allocated = _collection_set->eden_region_length();
 870     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
 871     _alloc_rate_ms_seq->add(alloc_rate_ms);
 872 
 873     double interval_ms =
 874       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
 875     update_recent_gc_times(end_time_sec, pause_time_ms);
 876     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
 877     if (recent_avg_pause_time_ratio() < 0.0 ||
 878         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
 879       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
 880       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
 881       if (_recent_avg_pause_time_ratio < 0.0) {
 882         _recent_avg_pause_time_ratio = 0.0;
 883       } else {
 884         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
 885         _recent_avg_pause_time_ratio = 1.0;
 886       }
 887     }
 888 
 889     // Compute the ratio of just this last pause time to the entire time range stored
 890     // in the vectors. Comparing this pause to the entire range, rather than only the
 891     // most recent interval, has the effect of smoothing over a possible transient 'burst'
 892     // of more frequent pauses that don't really reflect a change in heap occupancy.
 893     // This reduces the likelihood of a needless heap expansion being triggered.
 894     _last_pause_time_ratio =
 895       (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
 896   }
 897 
 898   bool new_in_marking_window = collector_state()->in_marking_window();
 899   bool new_in_marking_window_im = false;
 900   if (last_pause_included_initial_mark) {
 901     new_in_marking_window = true;
 902     new_in_marking_window_im = true;
 903   }
 904 
 905   if (collector_state()->last_young_gc()) {
 906     // This is supposed to to be the "last young GC" before we start
 907     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
 908     assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
 909 
 910     if (next_gc_should_be_mixed("start mixed GCs",
 911                                 "do not start mixed GCs")) {
 912       collector_state()->set_gcs_are_young(false);
 913     } else {
 914       // We aborted the mixed GC phase early.
 915       abort_time_to_mixed_tracking();


 921   if (!collector_state()->last_gc_was_young()) {
 922     // This is a mixed GC. Here we decide whether to continue doing
 923     // mixed GCs or not.
 924     if (!next_gc_should_be_mixed("continue mixed GCs",
 925                                  "do not continue mixed GCs")) {
 926       collector_state()->set_gcs_are_young(true);
 927 
 928       maybe_start_marking();
 929     }
 930   }
 931 
 932   _short_lived_surv_rate_group->start_adding_regions();
 933   // Do that for any other surv rate groups
 934 
 935   double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
 936 
 937   if (update_stats) {
 938     double cost_per_card_ms = 0.0;
 939     if (_pending_cards > 0) {
 940       cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
 941       _cost_per_card_ms_seq->add(cost_per_card_ms);
 942     }
 943     _cost_scan_hcc_seq->add(scan_hcc_time_ms);
 944 
 945     double cost_per_entry_ms = 0.0;
 946     if (cards_scanned > 10) {
 947       cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
 948       if (collector_state()->last_gc_was_young()) {
 949         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
 950       } else {
 951         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
 952       }
 953     }
 954 
 955     if (_max_rs_lengths > 0) {
 956       double cards_per_entry_ratio =
 957         (double) cards_scanned / (double) _max_rs_lengths;
 958       if (collector_state()->last_gc_was_young()) {
 959         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
 960       } else {
 961         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
 962       }
 963     }
 964 
 965     // This is defensive. For a while _max_rs_lengths could get
 966     // smaller than _recorded_rs_lengths which was causing
 967     // rs_length_diff to get very large and mess up the RSet length
 968     // predictions. The reason was unsafe concurrent updates to the
 969     // _inc_cset_recorded_rs_lengths field which the code below guards
 970     // against (see CR 7118202). This bug has now been fixed (see CR
 971     // 7119027). However, I'm still worried that
 972     // _inc_cset_recorded_rs_lengths might still end up somewhat
 973     // inaccurate. The concurrent refinement thread calculates an
 974     // RSet's length concurrently with other CR threads updating it
 975     // which might cause it to calculate the length incorrectly (if,
 976     // say, it's in mid-coarsening). So I'll leave in the defensive
 977     // conditional below just in case.
 978     size_t rs_length_diff = 0;
 979     size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
 980     if (_max_rs_lengths > recorded_rs_lengths) {
 981       rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
 982     }
 983     _rs_length_diff_seq->add((double) rs_length_diff);
 984 
 985     size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
 986     size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
 987     double cost_per_byte_ms = 0.0;
 988 
 989     if (copied_bytes > 0) {
 990       cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
 991       if (collector_state()->in_marking_window()) {
 992         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
 993       } else {
 994         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
 995       }
 996     }
 997 
 998     if (_collection_set->young_region_length() > 0) {
 999       _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
1000                                                _collection_set->young_region_length());
1001     }
1002 
1003     if (_collection_set->old_region_length() > 0) {
1004       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
1005                                                    _collection_set->old_region_length());
1006     }
1007 
1008     _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1009 
1010     _pending_cards_seq->add((double) _pending_cards);
1011     _rs_lengths_seq->add((double) _max_rs_lengths);
1012   }
1013 
1014   collector_state()->set_in_marking_window(new_in_marking_window);
1015   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1016   _free_regions_at_end_of_collection = _g1->num_free_regions();
1017   // IHOP control wants to know the expected young gen length if it were not
1018   // restrained by the heap reserve. Using the actual length would make the
1019   // prediction too small and the limit the young gen every time we get to the
1020   // predicted target occupancy.
1021   size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
1022   update_rs_lengths_prediction();
1023 
1024   update_ihop_prediction(app_time_ms / 1000.0,
1025                          _bytes_allocated_in_old_since_last_gc,
1026                          last_unrestrained_young_length * HeapRegion::GrainBytes);
1027   _bytes_allocated_in_old_since_last_gc = 0;
1028 
1029   _ihop_control->send_trace_event(_g1->gc_tracer_stw());
1030 
1031   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.


1128     cg1r->set_red_zone(g * k_gr);
1129     cg1r->reinitialize_threads();
1130 
1131     size_t processing_threshold_delta = MAX2<size_t>(cg1r->green_zone() * _predictor.sigma(), 1);
1132     size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1133                                     cg1r->yellow_zone());
1134     // Change the barrier params
1135     dcqs.set_process_completed_threshold((int)processing_threshold);
1136     dcqs.set_max_completed_queue((int)cg1r->red_zone());
1137   }
1138 
1139   size_t curr_queue_size = dcqs.completed_buffers_num();
1140   if (curr_queue_size >= cg1r->yellow_zone()) {
1141     dcqs.set_completed_queue_padding(curr_queue_size);
1142   } else {
1143     dcqs.set_completed_queue_padding(0);
1144   }
1145   dcqs.notify_if_necessary();
1146 }
1147 
1148 size_t G1CollectorPolicy::predict_rs_lengths() const {
1149   return get_new_size_prediction(_rs_lengths_seq);
1150 }
1151 
1152 size_t G1CollectorPolicy::predict_rs_length_diff() const {
1153   return get_new_size_prediction(_rs_length_diff_seq);
1154 }
1155 
1156 double G1CollectorPolicy::predict_alloc_rate_ms() const {
1157   return get_new_prediction(_alloc_rate_ms_seq);
1158 }
1159 
1160 double G1CollectorPolicy::predict_cost_per_card_ms() const {
1161   return get_new_prediction(_cost_per_card_ms_seq);
1162 }
1163 
1164 double G1CollectorPolicy::predict_scan_hcc_ms() const {
1165   return get_new_prediction(_cost_scan_hcc_seq);
1166 }
1167 
1168 double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const {
1169   return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
1170 }
1171 
1172 double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const {
1173   return get_new_prediction(_young_cards_per_entry_ratio_seq);
1174 }
1175 
1176 double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const {
1177   if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
1178     return predict_young_cards_per_entry_ratio();
1179   } else {
1180     return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
1181   }
1182 }
1183 
1184 size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const {
1185   return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
1186 }
1187 
1188 size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const {
1189   return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio());
1190 }
1191 
1192 double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const {
1193   if (collector_state()->gcs_are_young()) {
1194     return card_num * get_new_prediction(_cost_per_entry_ms_seq);
1195   } else {
1196     return predict_mixed_rs_scan_time_ms(card_num);
1197   }
1198 }
1199 
1200 double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const {
1201   if (_mixed_cost_per_entry_ms_seq->num() < 3) {
1202     return card_num * get_new_prediction(_cost_per_entry_ms_seq);
1203   } else {
1204     return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
1205   }
1206 }
1207 
1208 double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
1209   if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
1210     return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
1211   } else {
1212     return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
1213   }
1214 }
1215 
1216 double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const {
1217   if (collector_state()->during_concurrent_mark()) {
1218     return predict_object_copy_time_ms_during_cm(bytes_to_copy);
1219   } else {
1220     return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
1221   }
1222 }
1223 
1224 double G1CollectorPolicy::predict_constant_other_time_ms() const {
1225   return get_new_prediction(_constant_other_time_ms_seq);
1226 }
1227 
1228 double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const {
1229   return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
1230 }
1231 
1232 double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const {
1233   return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
1234 }
1235 
1236 double G1CollectorPolicy::predict_remark_time_ms() const {
1237   return get_new_prediction(_concurrent_mark_remark_times_ms);
1238 }
1239 
1240 double G1CollectorPolicy::predict_cleanup_time_ms() const {
1241   return get_new_prediction(_concurrent_mark_cleanup_times_ms);
1242 }
1243 
1244 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
1245   TruncatedSeq* seq = surv_rate_group->get_seq(age);
1246   guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
1247   double pred = get_new_prediction(seq);
1248   if (pred > 1.0) {
1249     pred = 1.0;
1250   }
1251   return pred;
1252 }
1253 
1254 double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
1255   return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
1256 }
1257 
1258 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
1259   return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
1260 }
1261 
1262 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1263                                                        size_t scanned_cards) const {
1264   return
1265     predict_rs_update_time_ms(pending_cards) +
1266     predict_rs_scan_time_ms(scanned_cards) +
1267     predict_constant_other_time_ms();
1268 }
1269 
1270 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
1271   size_t rs_length = predict_rs_lengths() + predict_rs_length_diff();
1272   size_t card_num;
1273   if (collector_state()->gcs_are_young()) {
1274     card_num = predict_young_card_num(rs_length);
1275   } else {
1276     card_num = predict_non_young_card_num(rs_length);
1277   }
1278   return predict_base_elapsed_time_ms(pending_cards, card_num);
1279 }
1280 
1281 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
1282   size_t bytes_to_copy;
1283   if (hr->is_marked())
1284     bytes_to_copy = hr->max_live_bytes();
1285   else {
1286     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1287     int age = hr->age_in_surv_rate_group();
1288     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1289     bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
1290   }
1291   return bytes_to_copy;
1292 }
1293 
1294 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1295                                                          bool for_young_gc) const {
1296   size_t rs_length = hr->rem_set()->occupied();
1297   size_t card_num;
1298 
1299   // Predicting the number of cards is based on which type of GC
1300   // we're predicting for.
1301   if (for_young_gc) {
1302     card_num = predict_young_card_num(rs_length);
1303   } else {
1304     card_num = predict_non_young_card_num(rs_length);
1305   }
1306   size_t bytes_to_copy = predict_bytes_to_copy(hr);
1307 
1308   double region_elapsed_time_ms =
1309     predict_rs_scan_time_ms(card_num) +
1310     predict_object_copy_time_ms(bytes_to_copy);
1311 
1312   // The prediction of the "other" time for this region is based
1313   // upon the region type and NOT the GC type.
1314   if (hr->is_young()) {
1315     region_elapsed_time_ms += predict_young_other_time_ms(1);
1316   } else {
1317     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1318   }
1319   return region_elapsed_time_ms;
1320 }
1321 
1322 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1323                                                double elapsed_ms) {
1324   _recent_gc_times_ms->add(elapsed_ms);
1325   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1326   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1327 }
1328 
1329 void G1CollectorPolicy::clear_ratio_check_data() {
1330   _ratio_over_threshold_count = 0;
1331   _ratio_over_threshold_sum = 0.0;
1332   _pauses_since_start = 0;
1333 }
1334 
1335 size_t G1CollectorPolicy::expansion_amount() {
1336   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1337   double last_gc_overhead = _last_pause_time_ratio * 100.0;
1338   double threshold = _gc_overhead_perc;
1339   size_t expand_bytes = 0;
1340 
1341   // If the heap is at less than half its maximum size, scale the threshold down,
1342   // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
1343   // though the scaling code will likely keep the increase small.
1344   if (_g1->capacity() <= _g1->max_capacity() / 2) {
1345     threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
1346     threshold = MAX2(threshold, 1.0);
1347   }
1348 
1349   // If the last GC time ratio is over the threshold, increment the count of
1350   // times it has been exceeded, and add this ratio to the sum of exceeded
1351   // ratios.
1352   if (last_gc_overhead > threshold) {
1353     _ratio_over_threshold_count++;
1354     _ratio_over_threshold_sum += last_gc_overhead;
1355   }
1356 
1357   // Check if we've had enough GC time ratio checks that were over the


1602   const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1603   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1604 }
1605 
1606 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1607   cset_chooser()->clear();
1608 
1609   WorkGang* workers = _g1->workers();
1610   uint n_workers = workers->active_workers();
1611 
1612   uint n_regions = _g1->num_regions();
1613   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1614   cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1615   ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers);
1616   workers->run_task(&par_known_garbage_task);
1617 
1618   cset_chooser()->sort_regions();
1619 
1620   double end_sec = os::elapsedTime();
1621   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1622   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1623   _prev_collection_pause_end_ms += elapsed_time_ms;
1624 
1625   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1626 }
1627 
1628 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1629   // Returns the given amount of reclaimable bytes (that represents
1630   // the amount of reclaimable space still to be collected) as a
1631   // percentage of the current heap capacity.
1632   size_t capacity_bytes = _g1->capacity();
1633   return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1634 }
1635 
1636 void G1CollectorPolicy::maybe_start_marking() {
1637   if (need_to_start_conc_mark("end of GC")) {
1638     // Note: this might have already been set, if during the last
1639     // pause we decided to start a cycle but at the beginning of
1640     // this pause we decided to postpone it. That's OK.
1641     collector_state()->set_initiate_conc_mark_if_possible(true);
1642   }
1643 }


1741   // The max old CSet region bound is based on the threshold expressed
1742   // as a percentage of the heap size. I.e., it should bound the
1743   // number of old regions added to the CSet irrespective of how many
1744   // of them are available.
1745 
1746   const G1CollectedHeap* g1h = G1CollectedHeap::heap();
1747   const size_t region_num = g1h->num_regions();
1748   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1749   size_t result = region_num * perc / 100;
1750   // emulate ceiling
1751   if (100 * result < region_num * perc) {
1752     result += 1;
1753   }
1754   return (uint) result;
1755 }
1756 
1757 void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) {
1758   double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms);
1759   _collection_set->finalize_old_part(time_remaining_ms);
1760 }
1761 


   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentG1Refine.hpp"
  27 #include "gc/g1/concurrentMarkThread.inline.hpp"
  28 #include "gc/g1/g1Analytics.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectionSet.hpp"
  31 #include "gc/g1/g1CollectorPolicy.hpp"
  32 #include "gc/g1/g1ConcurrentMark.hpp"
  33 #include "gc/g1/g1IHOPControl.hpp"
  34 #include "gc/g1/g1GCPhaseTimes.hpp"
  35 #include "gc/g1/g1YoungGenSizer.hpp"
  36 #include "gc/g1/heapRegion.inline.hpp"
  37 #include "gc/g1/heapRegionRemSet.hpp"
  38 #include "gc/shared/gcPolicyCounters.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/java.hpp"
  41 #include "runtime/mutexLocker.hpp"
  42 #include "utilities/debug.hpp"
  43 #include "utilities/pair.hpp"
  44 








































  45 G1CollectorPolicy::G1CollectorPolicy() :
  46   _predictor(G1ConfidencePercent / 100.0),
  47   _analytics(new G1Analytics(&_predictor)),
























  48   _pause_time_target_ms((double) MaxGCPauseMillis),





  49   _rs_lengths_prediction(0),
  50   _max_survivor_regions(0),


  51   _survivors_age_table(true),

  52   _gc_overhead_perc(0.0),
  53 
  54   _bytes_allocated_in_old_since_last_gc(0),
  55   _ihop_control(NULL),
  56   _initial_mark_to_mixed() {
  57 
  58   // SurvRateGroups below must be initialized after the predictor because they
  59   // indirectly use it through this object passed to their constructor.
  60   _short_lived_surv_rate_group =
  61     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
  62   _survivor_surv_rate_group =
  63     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
  64 
  65   // Set up the region size and associated fields. Given that the
  66   // policy is created before the heap, we have to set this up here,
  67   // so it's done as soon as possible.
  68 
  69   // It would have been natural to pass initial_heap_byte_size() and
  70   // max_heap_byte_size() to setup_heap_region_size() but those have
  71   // not been set up at this point since they should be aligned with
  72   // the region size. So, there is a circular dependency here. We base
  73   // the region size on the heap size, but the heap size should be
  74   // aligned with the region size. To get around this we use the
  75   // unaligned values for the heap.
  76   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
  77   HeapRegionRemSet::setup_remset_size();
  78 


  79   clear_ratio_check_data();
  80 
  81   _phase_times = new G1GCPhaseTimes(ParallelGCThreads);
  82 















  83   // Below, we might need to calculate the pause time target based on
  84   // the pause interval. When we do so we are going to give G1 maximum
  85   // flexibility and allow it to do pauses when it needs to. So, we'll
  86   // arrange that the pause interval to be pause time target + 1 to
  87   // ensure that a) the pause time target is maximized with respect to
  88   // the pause interval and b) we maintain the invariant that pause
  89   // time target < pause interval. If the user does not want this
  90   // maximum flexibility, they will have to set the pause interval
  91   // explicitly.
  92 
  93   // First make sure that, if either parameter is set, its value is
  94   // reasonable.
  95   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
  96     if (MaxGCPauseMillis < 1) {
  97       vm_exit_during_initialization("MaxGCPauseMillis should be "
  98                                     "greater than 0");
  99     }
 100   }
 101   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 102     if (GCPauseIntervalMillis < 1) {


 123   // the pause time target (this will also deal with the case when the
 124   // pause time target is the default value).
 125   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 126     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 127   }
 128 
 129   // Finally, make sure that the two parameters are consistent.
 130   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 131     char buffer[256];
 132     jio_snprintf(buffer, 256,
 133                  "MaxGCPauseMillis (%u) should be less than "
 134                  "GCPauseIntervalMillis (%u)",
 135                  MaxGCPauseMillis, GCPauseIntervalMillis);
 136     vm_exit_during_initialization(buffer);
 137   }
 138 
 139   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 140   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 141   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 142 



 143   _tenuring_threshold = MaxTenuringThreshold;
 144 
 145   assert(GCTimeRatio > 0,
 146          "we should have set it to a default value set_g1_gc_flags() "
 147          "if a user set it to 0");
 148   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 149 
 150   uintx reserve_perc = G1ReservePercent;
 151   // Put an artificial ceiling on this so that it's not set to a silly value.
 152   if (reserve_perc > 50) {
 153     reserve_perc = 50;
 154     warning("G1ReservePercent is set to a value that is too large, "
 155             "it's been updated to " UINTX_FORMAT, reserve_perc);
 156   }
 157   _reserve_factor = (double) reserve_perc / 100.0;
 158   // This will be set when the heap is expanded
 159   // for the first time during initialization.
 160   _reserve_regions = 0;
 161 
 162   _ihop_control = create_ihop_control();
 163 }
 164 
 165 G1CollectorPolicy::~G1CollectorPolicy() {
 166   delete _ihop_control;
 167 }
 168 








 169 void G1CollectorPolicy::initialize_alignments() {
 170   _space_alignment = HeapRegion::GrainBytes;
 171   size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
 172   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 173   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 174 }
 175 
 176 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
 177 
 178 void G1CollectorPolicy::post_heap_initialize() {
 179   uintx max_regions = G1CollectedHeap::heap()->max_regions();
 180   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
 181   if (max_young_size != MaxNewSize) {
 182     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
 183   }
 184 }
 185 
 186 void G1CollectorPolicy::initialize_flags() {
 187   if (G1HeapRegionSize != HeapRegion::GrainBytes) {
 188     FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);


 223   phase_times()->note_gc_start(num_active_workers);
 224 }
 225 
 226 // Create the jstat counters for the policy.
 227 void G1CollectorPolicy::initialize_gc_policy_counters() {
 228   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 229 }
 230 
 231 bool G1CollectorPolicy::predict_will_fit(uint young_length,
 232                                          double base_time_ms,
 233                                          uint base_free_regions,
 234                                          double target_pause_time_ms) const {
 235   if (young_length >= base_free_regions) {
 236     // end condition 1: not enough space for the young regions
 237     return false;
 238   }
 239 
 240   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
 241   size_t bytes_to_copy =
 242                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 243   double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy,
 244                                                                    collector_state()->during_concurrent_mark());
 245   double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length);
 246   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
 247   if (pause_time_ms > target_pause_time_ms) {
 248     // end condition 2: prediction is over the target pause time
 249     return false;
 250   }
 251 
 252   size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
 253 
 254   // When copying, we will likely need more bytes free than is live in the region.
 255   // Add some safety margin to factor in the confidence of our guess, and the
 256   // natural expected waste.
 257   // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
 258   // of the calculation: the lower the confidence, the more headroom.
 259   // (100 + TargetPLABWastePct) represents the increase in expected bytes during
 260   // copying due to anticipated waste in the PLABs.
 261   double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
 262   size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
 263 
 264   if (expected_bytes_to_copy > free_bytes) {
 265     // end condition 3: out-of-space


 269   // success!
 270   return true;
 271 }
 272 
 273 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
 274   // re-calculate the necessary reserve
 275   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
 276   // We use ceiling so that if reserve_regions_d is > 0.0 (but
 277   // smaller than 1.0) we'll get 1.
 278   _reserve_regions = (uint) ceil(reserve_regions_d);
 279 
 280   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 281 
 282   _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
 283 }
 284 
 285 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
 286                                                        uint base_min_length) const {
 287   uint desired_min_length = 0;
 288   if (adaptive_young_list_length()) {
 289     if (_analytics->num_alloc_rate_ms() > 3) {
 290       double now_sec = os::elapsedTime();
 291       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 292       double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
 293       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 294     } else {
 295       // otherwise we don't have enough info to make the prediction
 296     }
 297   }
 298   desired_min_length += base_min_length;
 299   // make sure we don't go below any user-defined minimum bound
 300   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 301 }
 302 
 303 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
 304   // Here, we might want to also take into account any additional
 305   // constraints (i.e., user-defined minimum bound). Currently, we
 306   // effectively don't set this bound.
 307   return _young_gen_sizer->max_desired_young_length();
 308 }
 309 
 310 uint G1CollectorPolicy::update_young_list_max_and_target_length() {
 311   return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
 312 }
 313 
 314 uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
 315   uint unbounded_target_length = update_young_list_target_length(rs_lengths);
 316   update_max_gc_locker_expansion();
 317   return unbounded_target_length;
 318 }
 319 
 320 uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
 321   YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
 322   _young_list_target_length = young_lengths.first;
 323   return young_lengths.second;
 324 }
 325 
 326 G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const {
 327   YoungTargetLengths result;
 328 
 329   // Calculate the absolute and desired min bounds first.
 330 
 331   // This is how many young regions we already have (currently: the survivors).


 396   assert(adaptive_young_list_length(), "pre-condition");
 397   assert(collector_state()->gcs_are_young(), "only call this for young GCs");
 398 
 399   // In case some edge-condition makes the desired max length too small...
 400   if (desired_max_length <= desired_min_length) {
 401     return desired_min_length;
 402   }
 403 
 404   // We'll adjust min_young_length and max_young_length not to include
 405   // the already allocated young regions (i.e., so they reflect the
 406   // min and max eden regions we'll allocate). The base_min_length
 407   // will be reflected in the predictions by the
 408   // survivor_regions_evac_time prediction.
 409   assert(desired_min_length > base_min_length, "invariant");
 410   uint min_young_length = desired_min_length - base_min_length;
 411   assert(desired_max_length > base_min_length, "invariant");
 412   uint max_young_length = desired_max_length - base_min_length;
 413 
 414   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 415   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
 416   size_t pending_cards = _analytics->predict_pending_cards();
 417   size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
 418   size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
 419   double base_time_ms =
 420     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
 421     survivor_regions_evac_time;
 422   uint available_free_regions = _free_regions_at_end_of_collection;
 423   uint base_free_regions = 0;
 424   if (available_free_regions > _reserve_regions) {
 425     base_free_regions = available_free_regions - _reserve_regions;
 426   }
 427 
 428   // Here, we will make sure that the shortest young length that
 429   // makes sense fits within the target pause time.
 430 
 431   if (predict_will_fit(min_young_length, base_time_ms,
 432                        base_free_regions, target_pause_time_ms)) {
 433     // The shortest young length will fit into the target pause time;
 434     // we'll now check whether the absolute maximum number of young
 435     // regions will fit in the target pause time. If not, we'll do
 436     // a binary search between min_young_length and max_young_length.
 437     if (predict_will_fit(max_young_length, base_time_ms,
 438                          base_free_regions, target_pause_time_ms)) {


 498        r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region();
 499        r = r->get_next_young_region()) {
 500     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
 501   }
 502   return survivor_regions_evac_time;
 503 }
 504 
 505 void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
 506   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 507 
 508   if (rs_lengths > _rs_lengths_prediction) {
 509     // add 10% to avoid having to recalculate often
 510     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 511     update_rs_lengths_prediction(rs_lengths_prediction);
 512 
 513     update_young_list_max_and_target_length(rs_lengths_prediction);
 514   }
 515 }
 516 
 517 void G1CollectorPolicy::update_rs_lengths_prediction() {
 518   update_rs_lengths_prediction(_analytics->predict_rs_lengths());
 519 }
 520 
 521 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
 522   if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
 523     _rs_lengths_prediction = prediction;
 524   }
 525 }
 526 
 527 #ifndef PRODUCT
 528 bool G1CollectorPolicy::verify_young_ages() {
 529   HeapRegion* head = _g1->young_list()->first_region();
 530   return
 531     verify_young_ages(head, _short_lived_surv_rate_group);
 532   // also call verify_young_ages on any additional surv rate groups
 533 }
 534 
 535 bool
 536 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 537                                      SurvRateGroup *surv_rate_group) {
 538   guarantee( surv_rate_group != NULL, "pre-condition" );


 566     }
 567   }
 568 
 569   return ret;
 570 }
 571 #endif // PRODUCT
 572 
 573 void G1CollectorPolicy::record_full_collection_start() {
 574   _full_collection_start_sec = os::elapsedTime();
 575   // Release the future to-space so that it is available for compaction into.
 576   collector_state()->set_full_collection(true);
 577 }
 578 
 579 void G1CollectorPolicy::record_full_collection_end() {
 580   // Consider this like a collection pause for the purposes of allocation
 581   // since last pause.
 582   double end_sec = os::elapsedTime();
 583   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 584   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 585 
 586   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 587 
 588   collector_state()->set_full_collection(false);
 589 
 590   // "Nuke" the heuristics that control the young/mixed GC
 591   // transitions and make sure we start with young GCs after the Full GC.
 592   collector_state()->set_gcs_are_young(true);
 593   collector_state()->set_last_young_gc(false);
 594   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 595   collector_state()->set_during_initial_mark_pause(false);
 596   collector_state()->set_in_marking_window(false);
 597   collector_state()->set_in_marking_window_im(false);
 598 
 599   _short_lived_surv_rate_group->start_adding_regions();
 600   // also call this on any additional surv rate groups
 601 
 602   _free_regions_at_end_of_collection = _g1->num_free_regions();
 603   // Reset survivors SurvRateGroup.
 604   _survivor_surv_rate_group->reset();
 605   update_young_list_max_and_target_length();
 606   update_rs_lengths_prediction();


 634   _survivors_age_table.clear();
 635 
 636   assert( verify_young_ages(), "region age verification" );
 637 }
 638 
 639 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 640                                                    mark_init_elapsed_time_ms) {
 641   collector_state()->set_during_marking(true);
 642   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 643   collector_state()->set_during_initial_mark_pause(false);
 644 }
 645 
 646 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 647   _mark_remark_start_sec = os::elapsedTime();
 648   collector_state()->set_during_marking(false);
 649 }
 650 
 651 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 652   double end_time_sec = os::elapsedTime();
 653   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 654   _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
 655   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
 656 
 657   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
 658 }
 659 
 660 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 661   _mark_cleanup_start_sec = os::elapsedTime();
 662 }
 663 
 664 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 665   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
 666                                                               "skip last young-only gc");
 667   collector_state()->set_last_young_gc(should_continue_with_reclaim);
 668   // We skip the marking phase.
 669   if (!should_continue_with_reclaim) {
 670     abort_time_to_mixed_tracking();
 671   }
 672   collector_state()->set_in_marking_window(false);
 673 }
 674 
 675 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {


 734 
 735 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
 736   double end_time_sec = os::elapsedTime();
 737 
 738   size_t cur_used_bytes = _g1->used();
 739   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 740   bool last_pause_included_initial_mark = false;
 741   bool update_stats = !_g1->evacuation_failed();
 742 
 743   NOT_PRODUCT(_short_lived_surv_rate_group->print());
 744 
 745   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 746 
 747   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
 748   if (last_pause_included_initial_mark) {
 749     record_concurrent_mark_init_end(0.0);
 750   } else {
 751     maybe_start_marking();
 752   }
 753 
 754   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
 755   if (app_time_ms < MIN_TIMER_GRANULARITY) {
 756     // This usually happens due to the timer not having the required
 757     // granularity. Some Linuxes are the usual culprits.
 758     // We'll just set it to something (arbitrarily) small.
 759     app_time_ms = 1.0;
 760   }
 761 
 762   if (update_stats) {
 763     // We maintain the invariant that all objects allocated by mutator
 764     // threads will be allocated out of eden regions. So, we can use
 765     // the eden region number allocated since the previous GC to
 766     // calculate the application's allocate rate. The only exception
 767     // to that is humongous objects that are allocated separately. But
 768     // given that humongous object allocations do not really affect
 769     // either the pause's duration nor when the next pause will take
 770     // place we can safely ignore them here.
 771     uint regions_allocated = _collection_set->eden_region_length();
 772     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
 773     _analytics->report_alloc_rate_ms(alloc_rate_ms);
 774 
 775     double interval_ms =
 776       (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
 777     _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
 778     _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);



















 779   }
 780 
 781   bool new_in_marking_window = collector_state()->in_marking_window();
 782   bool new_in_marking_window_im = false;
 783   if (last_pause_included_initial_mark) {
 784     new_in_marking_window = true;
 785     new_in_marking_window_im = true;
 786   }
 787 
 788   if (collector_state()->last_young_gc()) {
 789     // This is supposed to to be the "last young GC" before we start
 790     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
 791     assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
 792 
 793     if (next_gc_should_be_mixed("start mixed GCs",
 794                                 "do not start mixed GCs")) {
 795       collector_state()->set_gcs_are_young(false);
 796     } else {
 797       // We aborted the mixed GC phase early.
 798       abort_time_to_mixed_tracking();


 804   if (!collector_state()->last_gc_was_young()) {
 805     // This is a mixed GC. Here we decide whether to continue doing
 806     // mixed GCs or not.
 807     if (!next_gc_should_be_mixed("continue mixed GCs",
 808                                  "do not continue mixed GCs")) {
 809       collector_state()->set_gcs_are_young(true);
 810 
 811       maybe_start_marking();
 812     }
 813   }
 814 
 815   _short_lived_surv_rate_group->start_adding_regions();
 816   // Do that for any other surv rate groups
 817 
 818   double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
 819 
 820   if (update_stats) {
 821     double cost_per_card_ms = 0.0;
 822     if (_pending_cards > 0) {
 823       cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
 824       _analytics->report_cost_per_card_ms(cost_per_card_ms);
 825     }
 826     _analytics->report_cost_scan_hcc(scan_hcc_time_ms);
 827 
 828     double cost_per_entry_ms = 0.0;
 829     if (cards_scanned > 10) {
 830       cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
 831       _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());




 832     }
 833 
 834     if (_max_rs_lengths > 0) {
 835       double cards_per_entry_ratio =
 836         (double) cards_scanned / (double) _max_rs_lengths;
 837       _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());




 838     }
 839 
 840     // This is defensive. For a while _max_rs_lengths could get
 841     // smaller than _recorded_rs_lengths which was causing
 842     // rs_length_diff to get very large and mess up the RSet length
 843     // predictions. The reason was unsafe concurrent updates to the
 844     // _inc_cset_recorded_rs_lengths field which the code below guards
 845     // against (see CR 7118202). This bug has now been fixed (see CR
 846     // 7119027). However, I'm still worried that
 847     // _inc_cset_recorded_rs_lengths might still end up somewhat
 848     // inaccurate. The concurrent refinement thread calculates an
 849     // RSet's length concurrently with other CR threads updating it
 850     // which might cause it to calculate the length incorrectly (if,
 851     // say, it's in mid-coarsening). So I'll leave in the defensive
 852     // conditional below just in case.
 853     size_t rs_length_diff = 0;
 854     size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
 855     if (_max_rs_lengths > recorded_rs_lengths) {
 856       rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
 857     }
 858     _analytics->report_rs_length_diff((double) rs_length_diff);
 859 
 860     size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
 861     size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
 862     double cost_per_byte_ms = 0.0;
 863 
 864     if (copied_bytes > 0) {
 865       cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
 866       _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());




 867     }
 868 
 869     if (_collection_set->young_region_length() > 0) {
 870       _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
 871                                                            _collection_set->young_region_length());
 872     }
 873 
 874     if (_collection_set->old_region_length() > 0) {
 875       _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
 876                                                                _collection_set->old_region_length());
 877     }
 878 
 879     _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
 880 
 881     _analytics->report_pending_cards((double) _pending_cards);
 882     _analytics->report_rs_lengths((double) _max_rs_lengths);
 883   }
 884 
 885   collector_state()->set_in_marking_window(new_in_marking_window);
 886   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
 887   _free_regions_at_end_of_collection = _g1->num_free_regions();
 888   // IHOP control wants to know the expected young gen length if it were not
 889   // restrained by the heap reserve. Using the actual length would make the
 890   // prediction too small and the limit the young gen every time we get to the
 891   // predicted target occupancy.
 892   size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
 893   update_rs_lengths_prediction();
 894 
 895   update_ihop_prediction(app_time_ms / 1000.0,
 896                          _bytes_allocated_in_old_since_last_gc,
 897                          last_unrestrained_young_length * HeapRegion::GrainBytes);
 898   _bytes_allocated_in_old_since_last_gc = 0;
 899 
 900   _ihop_control->send_trace_event(_g1->gc_tracer_stw());
 901 
 902   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.


 999     cg1r->set_red_zone(g * k_gr);
1000     cg1r->reinitialize_threads();
1001 
1002     size_t processing_threshold_delta = MAX2<size_t>(cg1r->green_zone() * _predictor.sigma(), 1);
1003     size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1004                                     cg1r->yellow_zone());
1005     // Change the barrier params
1006     dcqs.set_process_completed_threshold((int)processing_threshold);
1007     dcqs.set_max_completed_queue((int)cg1r->red_zone());
1008   }
1009 
1010   size_t curr_queue_size = dcqs.completed_buffers_num();
1011   if (curr_queue_size >= cg1r->yellow_zone()) {
1012     dcqs.set_completed_queue_padding(curr_queue_size);
1013   } else {
1014     dcqs.set_completed_queue_padding(0);
1015   }
1016   dcqs.notify_if_necessary();
1017 }
1018 
































































































1019 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
1020   TruncatedSeq* seq = surv_rate_group->get_seq(age);
1021   guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
1022   double pred = _predictor.get_new_prediction(seq);
1023   if (pred > 1.0) {
1024     pred = 1.0;
1025   }
1026   return pred;
1027 }
1028 
1029 double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
1030   return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
1031 }
1032 
1033 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
1034   return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
1035 }
1036 
1037 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1038                                                        size_t scanned_cards) const {
1039   return
1040     _analytics->predict_rs_update_time_ms(pending_cards) +
1041     _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
1042     _analytics->predict_constant_other_time_ms();
1043 }
1044 
1045 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
1046   size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
1047   size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());





1048   return predict_base_elapsed_time_ms(pending_cards, card_num);
1049 }
1050 
1051 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
1052   size_t bytes_to_copy;
1053   if (hr->is_marked())
1054     bytes_to_copy = hr->max_live_bytes();
1055   else {
1056     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1057     int age = hr->age_in_surv_rate_group();
1058     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1059     bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
1060   }
1061   return bytes_to_copy;
1062 }
1063 
1064 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1065                                                          bool for_young_gc) const {
1066   size_t rs_length = hr->rem_set()->occupied();


1067   // Predicting the number of cards is based on which type of GC
1068   // we're predicting for.
1069   size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);




1070   size_t bytes_to_copy = predict_bytes_to_copy(hr);
1071 
1072   double region_elapsed_time_ms =
1073     _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
1074     _analytics->predict_object_copy_time_ms(bytes_to_copy ,collector_state()->during_concurrent_mark());
1075 
1076   // The prediction of the "other" time for this region is based
1077   // upon the region type and NOT the GC type.
1078   if (hr->is_young()) {
1079     region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
1080   } else {
1081     region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
1082   }
1083   return region_elapsed_time_ms;
1084 }
1085 







1086 void G1CollectorPolicy::clear_ratio_check_data() {
1087   _ratio_over_threshold_count = 0;
1088   _ratio_over_threshold_sum = 0.0;
1089   _pauses_since_start = 0;
1090 }
1091 
1092 size_t G1CollectorPolicy::expansion_amount() {
1093   double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
1094   double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
1095   double threshold = _gc_overhead_perc;
1096   size_t expand_bytes = 0;
1097 
1098   // If the heap is at less than half its maximum size, scale the threshold down,
1099   // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
1100   // though the scaling code will likely keep the increase small.
1101   if (_g1->capacity() <= _g1->max_capacity() / 2) {
1102     threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
1103     threshold = MAX2(threshold, 1.0);
1104   }
1105 
1106   // If the last GC time ratio is over the threshold, increment the count of
1107   // times it has been exceeded, and add this ratio to the sum of exceeded
1108   // ratios.
1109   if (last_gc_overhead > threshold) {
1110     _ratio_over_threshold_count++;
1111     _ratio_over_threshold_sum += last_gc_overhead;
1112   }
1113 
1114   // Check if we've had enough GC time ratio checks that were over the


1359   const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1360   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1361 }
1362 
1363 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1364   cset_chooser()->clear();
1365 
1366   WorkGang* workers = _g1->workers();
1367   uint n_workers = workers->active_workers();
1368 
1369   uint n_regions = _g1->num_regions();
1370   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1371   cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1372   ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers);
1373   workers->run_task(&par_known_garbage_task);
1374 
1375   cset_chooser()->sort_regions();
1376 
1377   double end_sec = os::elapsedTime();
1378   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1379   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1380   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1381 
1382   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1383 }
1384 
1385 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1386   // Returns the given amount of reclaimable bytes (that represents
1387   // the amount of reclaimable space still to be collected) as a
1388   // percentage of the current heap capacity.
1389   size_t capacity_bytes = _g1->capacity();
1390   return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1391 }
1392 
1393 void G1CollectorPolicy::maybe_start_marking() {
1394   if (need_to_start_conc_mark("end of GC")) {
1395     // Note: this might have already been set, if during the last
1396     // pause we decided to start a cycle but at the beginning of
1397     // this pause we decided to postpone it. That's OK.
1398     collector_state()->set_initiate_conc_mark_if_possible(true);
1399   }
1400 }


1498   // The max old CSet region bound is based on the threshold expressed
1499   // as a percentage of the heap size. I.e., it should bound the
1500   // number of old regions added to the CSet irrespective of how many
1501   // of them are available.
1502 
1503   const G1CollectedHeap* g1h = G1CollectedHeap::heap();
1504   const size_t region_num = g1h->num_regions();
1505   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1506   size_t result = region_num * perc / 100;
1507   // emulate ceiling
1508   if (100 * result < region_num * perc) {
1509     result += 1;
1510   }
1511   return (uint) result;
1512 }
1513 
1514 void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) {
1515   double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms);
1516   _collection_set->finalize_old_part(time_remaining_ms);
1517 }

< prev index next >