1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentG1Refine.hpp"
  27 #include "gc/g1/concurrentMark.hpp"
  28 #include "gc/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorPolicy.hpp"
  31 #include "gc/g1/g1ErgoVerbose.hpp"
  32 #include "gc/g1/g1GCPhaseTimes.hpp"
  33 #include "gc/g1/g1Log.hpp"
  34 #include "gc/g1/heapRegion.inline.hpp"
  35 #include "gc/g1/heapRegionRemSet.hpp"
  36 #include "gc/shared/gcPolicyCounters.hpp"
  37 #include "runtime/arguments.hpp"
  38 #include "runtime/java.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "utilities/debug.hpp"
  41 
  42 // Different defaults for different number of GC threads
  43 // They were chosen by running GCOld and SPECjbb on debris with different
  44 //   numbers of GC threads and choosing them based on the results
  45 
  46 // all the same
  47 static double rs_length_diff_defaults[] = {
  48   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  49 };
  50 
  51 static double cost_per_card_ms_defaults[] = {
  52   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
  53 };
  54 
  55 // all the same
  56 static double young_cards_per_entry_ratio_defaults[] = {
  57   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
  58 };
  59 
  60 static double cost_per_entry_ms_defaults[] = {
  61   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
  62 };
  63 
  64 static double cost_per_byte_ms_defaults[] = {
  65   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
  66 };
  67 
  68 // these should be pretty consistent
  69 static double constant_other_time_ms_defaults[] = {
  70   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
  71 };
  72 
  73 
  74 static double young_other_cost_per_region_ms_defaults[] = {
  75   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
  76 };
  77 
  78 static double non_young_other_cost_per_region_ms_defaults[] = {
  79   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
  80 };
  81 
  82 G1CollectorPolicy::G1CollectorPolicy() :
  83   _predictor(G1ConfidencePercent / 100.0),
  84   _parallel_gc_threads(ParallelGCThreads),
  85 
  86   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  87   _stop_world_start(0.0),
  88 
  89   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  90   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  91 
  92   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  93   _prev_collection_pause_end_ms(0.0),
  94   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
  95   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  96   _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
  97   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  98   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  99   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 100   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 101   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 102   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 103   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 104   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 105   _non_young_other_cost_per_region_ms_seq(
 106                                          new TruncatedSeq(TruncatedSeqLength)),
 107 
 108   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 109   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 110 
 111   _pause_time_target_ms((double) MaxGCPauseMillis),
 112 
 113   _recent_prev_end_times_for_all_gcs_sec(
 114                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 115 
 116   _recent_avg_pause_time_ratio(0.0),
 117   _rs_lengths_prediction(0),
 118   _max_survivor_regions(0),
 119 
 120   _eden_used_bytes_before_gc(0),
 121   _survivor_used_bytes_before_gc(0),
 122   _heap_used_bytes_before_gc(0),
 123   _metaspace_used_bytes_before_gc(0),
 124   _eden_capacity_bytes_before_gc(0),
 125   _heap_capacity_bytes_before_gc(0),
 126 
 127   _eden_cset_region_length(0),
 128   _survivor_cset_region_length(0),
 129   _old_cset_region_length(0),
 130 
 131   _collection_set(NULL),
 132   _collection_set_bytes_used_before(0),
 133 
 134   // Incremental CSet attributes
 135   _inc_cset_build_state(Inactive),
 136   _inc_cset_head(NULL),
 137   _inc_cset_tail(NULL),
 138   _inc_cset_bytes_used_before(0),
 139   _inc_cset_max_finger(NULL),
 140   _inc_cset_recorded_rs_lengths(0),
 141   _inc_cset_recorded_rs_lengths_diffs(0),
 142   _inc_cset_predicted_elapsed_time_ms(0.0),
 143   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
 144 
 145   // add here any more surv rate groups
 146   _recorded_survivor_regions(0),
 147   _recorded_survivor_head(NULL),
 148   _recorded_survivor_tail(NULL),
 149   _survivors_age_table(true),
 150 
 151   _gc_overhead_perc(0.0) {
 152 
 153   // SurvRateGroups below must be initialized after the predictor because they
 154   // indirectly use it through this object passed to their constructor.
 155   _short_lived_surv_rate_group =
 156     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
 157   _survivor_surv_rate_group =
 158     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 159 
 160   // Set up the region size and associated fields. Given that the
 161   // policy is created before the heap, we have to set this up here,
 162   // so it's done as soon as possible.
 163 
 164   // It would have been natural to pass initial_heap_byte_size() and
 165   // max_heap_byte_size() to setup_heap_region_size() but those have
 166   // not been set up at this point since they should be aligned with
 167   // the region size. So, there is a circular dependency here. We base
 168   // the region size on the heap size, but the heap size should be
 169   // aligned with the region size. To get around this we use the
 170   // unaligned values for the heap.
 171   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
 172   HeapRegionRemSet::setup_remset_size();
 173 
 174   G1ErgoVerbose::initialize();
 175   if (PrintAdaptiveSizePolicy) {
 176     // Currently, we only use a single switch for all the heuristics.
 177     G1ErgoVerbose::set_enabled(true);
 178     // Given that we don't currently have a verboseness level
 179     // parameter, we'll hardcode this to high. This can be easily
 180     // changed in the future.
 181     G1ErgoVerbose::set_level(ErgoHigh);
 182   } else {
 183     G1ErgoVerbose::set_enabled(false);
 184   }
 185 
 186   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 187   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 188 
 189   _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
 190 
 191   int index = MIN2(_parallel_gc_threads - 1, 7);
 192 
 193   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 194   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 195   _cost_scan_hcc_seq->add(0.0);
 196   _young_cards_per_entry_ratio_seq->add(
 197                                   young_cards_per_entry_ratio_defaults[index]);
 198   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 199   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 200   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 201   _young_other_cost_per_region_ms_seq->add(
 202                                young_other_cost_per_region_ms_defaults[index]);
 203   _non_young_other_cost_per_region_ms_seq->add(
 204                            non_young_other_cost_per_region_ms_defaults[index]);
 205 
 206   // Below, we might need to calculate the pause time target based on
 207   // the pause interval. When we do so we are going to give G1 maximum
 208   // flexibility and allow it to do pauses when it needs to. So, we'll
 209   // arrange that the pause interval to be pause time target + 1 to
 210   // ensure that a) the pause time target is maximized with respect to
 211   // the pause interval and b) we maintain the invariant that pause
 212   // time target < pause interval. If the user does not want this
 213   // maximum flexibility, they will have to set the pause interval
 214   // explicitly.
 215 
 216   // First make sure that, if either parameter is set, its value is
 217   // reasonable.
 218   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 219     if (MaxGCPauseMillis < 1) {
 220       vm_exit_during_initialization("MaxGCPauseMillis should be "
 221                                     "greater than 0");
 222     }
 223   }
 224   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 225     if (GCPauseIntervalMillis < 1) {
 226       vm_exit_during_initialization("GCPauseIntervalMillis should be "
 227                                     "greater than 0");
 228     }
 229   }
 230 
 231   // Then, if the pause time target parameter was not set, set it to
 232   // the default value.
 233   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 234     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 235       // The default pause time target in G1 is 200ms
 236       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
 237     } else {
 238       // We do not allow the pause interval to be set without the
 239       // pause time target
 240       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
 241                                     "without setting MaxGCPauseMillis");
 242     }
 243   }
 244 
 245   // Then, if the interval parameter was not set, set it according to
 246   // the pause time target (this will also deal with the case when the
 247   // pause time target is the default value).
 248   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 249     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 250   }
 251 
 252   // Finally, make sure that the two parameters are consistent.
 253   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 254     char buffer[256];
 255     jio_snprintf(buffer, 256,
 256                  "MaxGCPauseMillis (%u) should be less than "
 257                  "GCPauseIntervalMillis (%u)",
 258                  MaxGCPauseMillis, GCPauseIntervalMillis);
 259     vm_exit_during_initialization(buffer);
 260   }
 261 
 262   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 263   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 264   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 265 
 266   // start conservatively (around 50ms is about right)
 267   _concurrent_mark_remark_times_ms->add(0.05);
 268   _concurrent_mark_cleanup_times_ms->add(0.20);
 269   _tenuring_threshold = MaxTenuringThreshold;
 270 
 271   assert(GCTimeRatio > 0,
 272          "we should have set it to a default value set_g1_gc_flags() "
 273          "if a user set it to 0");
 274   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 275 
 276   uintx reserve_perc = G1ReservePercent;
 277   // Put an artificial ceiling on this so that it's not set to a silly value.
 278   if (reserve_perc > 50) {
 279     reserve_perc = 50;
 280     warning("G1ReservePercent is set to a value that is too large, "
 281             "it's been updated to " UINTX_FORMAT, reserve_perc);
 282   }
 283   _reserve_factor = (double) reserve_perc / 100.0;
 284   // This will be set when the heap is expanded
 285   // for the first time during initialization.
 286   _reserve_regions = 0;
 287 
 288   _collectionSetChooser = new CollectionSetChooser();
 289 }
 290 
 291 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
 292   return _predictor.get_new_prediction(seq);
 293 }
 294 
 295 void G1CollectorPolicy::initialize_alignments() {
 296   _space_alignment = HeapRegion::GrainBytes;
 297   size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
 298   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 299   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 300 }
 301 
 302 void G1CollectorPolicy::initialize_flags() {
 303   if (G1HeapRegionSize != HeapRegion::GrainBytes) {
 304     FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
 305   }
 306 
 307   if (SurvivorRatio < 1) {
 308     vm_exit_during_initialization("Invalid survivor ratio specified");
 309   }
 310   CollectorPolicy::initialize_flags();
 311   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
 312 }
 313 
 314 void G1CollectorPolicy::post_heap_initialize() {
 315   uintx max_regions = G1CollectedHeap::heap()->max_regions();
 316   size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
 317   if (max_young_size != MaxNewSize) {
 318     FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
 319   }
 320 }
 321 
 322 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
 323 
 324 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
 325         _min_desired_young_length(0), _max_desired_young_length(0) {
 326   if (FLAG_IS_CMDLINE(NewRatio)) {
 327     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
 328       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
 329     } else {
 330       _sizer_kind = SizerNewRatio;
 331       _adaptive_size = false;
 332       return;
 333     }
 334   }
 335 
 336   if (NewSize > MaxNewSize) {
 337     if (FLAG_IS_CMDLINE(MaxNewSize)) {
 338       warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
 339               "A new max generation size of " SIZE_FORMAT "k will be used.",
 340               NewSize/K, MaxNewSize/K, NewSize/K);
 341     }
 342     MaxNewSize = NewSize;
 343   }
 344 
 345   if (FLAG_IS_CMDLINE(NewSize)) {
 346     _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
 347                                      1U);
 348     if (FLAG_IS_CMDLINE(MaxNewSize)) {
 349       _max_desired_young_length =
 350                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
 351                                   1U);
 352       _sizer_kind = SizerMaxAndNewSize;
 353       _adaptive_size = _min_desired_young_length == _max_desired_young_length;
 354     } else {
 355       _sizer_kind = SizerNewSizeOnly;
 356     }
 357   } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
 358     _max_desired_young_length =
 359                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
 360                                   1U);
 361     _sizer_kind = SizerMaxNewSizeOnly;
 362   }
 363 }
 364 
 365 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
 366   uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;
 367   return MAX2(1U, default_value);
 368 }
 369 
 370 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
 371   uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;
 372   return MAX2(1U, default_value);
 373 }
 374 
 375 void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) {
 376   assert(number_of_heap_regions > 0, "Heap must be initialized");
 377 
 378   switch (_sizer_kind) {
 379     case SizerDefaults:
 380       *min_young_length = calculate_default_min_length(number_of_heap_regions);
 381       *max_young_length = calculate_default_max_length(number_of_heap_regions);
 382       break;
 383     case SizerNewSizeOnly:
 384       *max_young_length = calculate_default_max_length(number_of_heap_regions);
 385       *max_young_length = MAX2(*min_young_length, *max_young_length);
 386       break;
 387     case SizerMaxNewSizeOnly:
 388       *min_young_length = calculate_default_min_length(number_of_heap_regions);
 389       *min_young_length = MIN2(*min_young_length, *max_young_length);
 390       break;
 391     case SizerMaxAndNewSize:
 392       // Do nothing. Values set on the command line, don't update them at runtime.
 393       break;
 394     case SizerNewRatio:
 395       *min_young_length = number_of_heap_regions / (NewRatio + 1);
 396       *max_young_length = *min_young_length;
 397       break;
 398     default:
 399       ShouldNotReachHere();
 400   }
 401 
 402   assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values");
 403 }
 404 
 405 uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) {
 406   // We need to pass the desired values because recalculation may not update these
 407   // values in some cases.
 408   uint temp = _min_desired_young_length;
 409   uint result = _max_desired_young_length;
 410   recalculate_min_max_young_length(number_of_heap_regions, &temp, &result);
 411   return result;
 412 }
 413 
 414 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
 415   recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
 416           &_max_desired_young_length);
 417 }
 418 
 419 void G1CollectorPolicy::init() {
 420   // Set aside an initial future to_space.
 421   _g1 = G1CollectedHeap::heap();
 422 
 423   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 424 
 425   initialize_gc_policy_counters();
 426 
 427   if (adaptive_young_list_length()) {
 428     _young_list_fixed_length = 0;
 429   } else {
 430     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
 431   }
 432   _free_regions_at_end_of_collection = _g1->num_free_regions();
 433 
 434   update_young_list_max_and_target_length();
 435   // We may immediately start allocating regions and placing them on the
 436   // collection set list. Initialize the per-collection set info
 437   start_incremental_cset_building();
 438 }
 439 
 440 void G1CollectorPolicy::note_gc_start(uint num_active_workers) {
 441   phase_times()->note_gc_start(num_active_workers);
 442 }
 443 
 444 // Create the jstat counters for the policy.
 445 void G1CollectorPolicy::initialize_gc_policy_counters() {
 446   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 447 }
 448 
 449 bool G1CollectorPolicy::predict_will_fit(uint young_length,
 450                                          double base_time_ms,
 451                                          uint base_free_regions,
 452                                          double target_pause_time_ms) const {
 453   if (young_length >= base_free_regions) {
 454     // end condition 1: not enough space for the young regions
 455     return false;
 456   }
 457 
 458   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
 459   size_t bytes_to_copy =
 460                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 461   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
 462   double young_other_time_ms = predict_young_other_time_ms(young_length);
 463   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
 464   if (pause_time_ms > target_pause_time_ms) {
 465     // end condition 2: prediction is over the target pause time
 466     return false;
 467   }
 468 
 469   size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
 470   if ((2.0 /* magic */ * _predictor.sigma()) * bytes_to_copy > free_bytes) {
 471     // end condition 3: out-of-space (conservatively!)
 472     return false;
 473   }
 474 
 475   // success!
 476   return true;
 477 }
 478 
 479 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
 480   // re-calculate the necessary reserve
 481   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
 482   // We use ceiling so that if reserve_regions_d is > 0.0 (but
 483   // smaller than 1.0) we'll get 1.
 484   _reserve_regions = (uint) ceil(reserve_regions_d);
 485 
 486   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 487 }
 488 
 489 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
 490                                                        uint base_min_length) const {
 491   uint desired_min_length = 0;
 492   if (adaptive_young_list_length()) {
 493     if (_alloc_rate_ms_seq->num() > 3) {
 494       double now_sec = os::elapsedTime();
 495       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 496       double alloc_rate_ms = predict_alloc_rate_ms();
 497       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 498     } else {
 499       // otherwise we don't have enough info to make the prediction
 500     }
 501   }
 502   desired_min_length += base_min_length;
 503   // make sure we don't go below any user-defined minimum bound
 504   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 505 }
 506 
 507 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
 508   // Here, we might want to also take into account any additional
 509   // constraints (i.e., user-defined minimum bound). Currently, we
 510   // effectively don't set this bound.
 511   return _young_gen_sizer->max_desired_young_length();
 512 }
 513 
 514 void G1CollectorPolicy::update_young_list_max_and_target_length() {
 515   update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
 516 }
 517 
 518 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
 519   update_young_list_target_length(rs_lengths);
 520   update_max_gc_locker_expansion();
 521 }
 522 
 523 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
 524   _young_list_target_length = bounded_young_list_target_length(rs_lengths);
 525 }
 526 
 527 void G1CollectorPolicy::update_young_list_target_length() {
 528   update_young_list_target_length(get_new_prediction(_rs_lengths_seq));
 529 }
 530 
 531 uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths) const {
 532   // Calculate the absolute and desired min bounds.
 533 
 534   // This is how many young regions we already have (currently: the survivors).
 535   uint base_min_length = recorded_survivor_regions();
 536   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
 537   // This is the absolute minimum young length. Ensure that we
 538   // will at least have one eden region available for allocation.
 539   uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
 540   // If we shrank the young list target it should not shrink below the current size.
 541   desired_min_length = MAX2(desired_min_length, absolute_min_length);
 542   // Calculate the absolute and desired max bounds.
 543 
 544   // We will try our best not to "eat" into the reserve.
 545   uint absolute_max_length = 0;
 546   if (_free_regions_at_end_of_collection > _reserve_regions) {
 547     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
 548   }
 549   uint desired_max_length = calculate_young_list_desired_max_length();
 550   if (desired_max_length > absolute_max_length) {
 551     desired_max_length = absolute_max_length;
 552   }
 553 
 554   uint young_list_target_length = 0;
 555   if (adaptive_young_list_length()) {
 556     if (collector_state()->gcs_are_young()) {
 557       young_list_target_length =
 558                         calculate_young_list_target_length(rs_lengths,
 559                                                            base_min_length,
 560                                                            desired_min_length,
 561                                                            desired_max_length);
 562     } else {
 563       // Don't calculate anything and let the code below bound it to
 564       // the desired_min_length, i.e., do the next GC as soon as
 565       // possible to maximize how many old regions we can add to it.
 566     }
 567   } else {
 568     // The user asked for a fixed young gen so we'll fix the young gen
 569     // whether the next GC is young or mixed.
 570     young_list_target_length = _young_list_fixed_length;
 571   }
 572 
 573   // Make sure we don't go over the desired max length, nor under the
 574   // desired min length. In case they clash, desired_min_length wins
 575   // which is why that test is second.
 576   if (young_list_target_length > desired_max_length) {
 577     young_list_target_length = desired_max_length;
 578   }
 579   if (young_list_target_length < desired_min_length) {
 580     young_list_target_length = desired_min_length;
 581   }
 582 
 583   assert(young_list_target_length > recorded_survivor_regions(),
 584          "we should be able to allocate at least one eden region");
 585   assert(young_list_target_length >= absolute_min_length, "post-condition");
 586 
 587   return young_list_target_length;
 588 }
 589 
 590 uint
 591 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
 592                                                      uint base_min_length,
 593                                                      uint desired_min_length,
 594                                                      uint desired_max_length) const {
 595   assert(adaptive_young_list_length(), "pre-condition");
 596   assert(collector_state()->gcs_are_young(), "only call this for young GCs");
 597 
 598   // In case some edge-condition makes the desired max length too small...
 599   if (desired_max_length <= desired_min_length) {
 600     return desired_min_length;
 601   }
 602 
 603   // We'll adjust min_young_length and max_young_length not to include
 604   // the already allocated young regions (i.e., so they reflect the
 605   // min and max eden regions we'll allocate). The base_min_length
 606   // will be reflected in the predictions by the
 607   // survivor_regions_evac_time prediction.
 608   assert(desired_min_length > base_min_length, "invariant");
 609   uint min_young_length = desired_min_length - base_min_length;
 610   assert(desired_max_length > base_min_length, "invariant");
 611   uint max_young_length = desired_max_length - base_min_length;
 612 
 613   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 614   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
 615   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
 616   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
 617   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
 618   double base_time_ms =
 619     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
 620     survivor_regions_evac_time;
 621   uint available_free_regions = _free_regions_at_end_of_collection;
 622   uint base_free_regions = 0;
 623   if (available_free_regions > _reserve_regions) {
 624     base_free_regions = available_free_regions - _reserve_regions;
 625   }
 626 
 627   // Here, we will make sure that the shortest young length that
 628   // makes sense fits within the target pause time.
 629 
 630   if (predict_will_fit(min_young_length, base_time_ms,
 631                        base_free_regions, target_pause_time_ms)) {
 632     // The shortest young length will fit into the target pause time;
 633     // we'll now check whether the absolute maximum number of young
 634     // regions will fit in the target pause time. If not, we'll do
 635     // a binary search between min_young_length and max_young_length.
 636     if (predict_will_fit(max_young_length, base_time_ms,
 637                          base_free_regions, target_pause_time_ms)) {
 638       // The maximum young length will fit into the target pause time.
 639       // We are done so set min young length to the maximum length (as
 640       // the result is assumed to be returned in min_young_length).
 641       min_young_length = max_young_length;
 642     } else {
 643       // The maximum possible number of young regions will not fit within
 644       // the target pause time so we'll search for the optimal
 645       // length. The loop invariants are:
 646       //
 647       // min_young_length < max_young_length
 648       // min_young_length is known to fit into the target pause time
 649       // max_young_length is known not to fit into the target pause time
 650       //
 651       // Going into the loop we know the above hold as we've just
 652       // checked them. Every time around the loop we check whether
 653       // the middle value between min_young_length and
 654       // max_young_length fits into the target pause time. If it
 655       // does, it becomes the new min. If it doesn't, it becomes
 656       // the new max. This way we maintain the loop invariants.
 657 
 658       assert(min_young_length < max_young_length, "invariant");
 659       uint diff = (max_young_length - min_young_length) / 2;
 660       while (diff > 0) {
 661         uint young_length = min_young_length + diff;
 662         if (predict_will_fit(young_length, base_time_ms,
 663                              base_free_regions, target_pause_time_ms)) {
 664           min_young_length = young_length;
 665         } else {
 666           max_young_length = young_length;
 667         }
 668         assert(min_young_length <  max_young_length, "invariant");
 669         diff = (max_young_length - min_young_length) / 2;
 670       }
 671       // The results is min_young_length which, according to the
 672       // loop invariants, should fit within the target pause time.
 673 
 674       // These are the post-conditions of the binary search above:
 675       assert(min_young_length < max_young_length,
 676              "otherwise we should have discovered that max_young_length "
 677              "fits into the pause target and not done the binary search");
 678       assert(predict_will_fit(min_young_length, base_time_ms,
 679                               base_free_regions, target_pause_time_ms),
 680              "min_young_length, the result of the binary search, should "
 681              "fit into the pause target");
 682       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
 683                                base_free_regions, target_pause_time_ms),
 684              "min_young_length, the result of the binary search, should be "
 685              "optimal, so no larger length should fit into the pause target");
 686     }
 687   } else {
 688     // Even the minimum length doesn't fit into the pause time
 689     // target, return it as the result nevertheless.
 690   }
 691   return base_min_length + min_young_length;
 692 }
 693 
 694 double G1CollectorPolicy::predict_survivor_regions_evac_time() const {
 695   double survivor_regions_evac_time = 0.0;
 696   for (HeapRegion * r = _recorded_survivor_head;
 697        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
 698        r = r->get_next_young_region()) {
 699     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
 700   }
 701   return survivor_regions_evac_time;
 702 }
 703 
 704 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
 705   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 706 
 707   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
 708   if (rs_lengths > _rs_lengths_prediction) {
 709     // add 10% to avoid having to recalculate often
 710     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 711     update_rs_lengths_prediction(rs_lengths_prediction);
 712 
 713     update_young_list_max_and_target_length(rs_lengths_prediction);
 714   }
 715 }
 716 
 717 void G1CollectorPolicy::update_rs_lengths_prediction() {
 718   update_rs_lengths_prediction(get_new_prediction(_rs_lengths_seq));
 719 }
 720 
 721 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
 722   if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
 723     _rs_lengths_prediction = prediction;
 724   }
 725 }
 726 
 727 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
 728                                                bool is_tlab,
 729                                                bool* gc_overhead_limit_was_exceeded) {
 730   guarantee(false, "Not using this policy feature yet.");
 731   return NULL;
 732 }
 733 
 734 // This method controls how a collector handles one or more
 735 // of its generations being fully allocated.
 736 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
 737                                                        bool is_tlab) {
 738   guarantee(false, "Not using this policy feature yet.");
 739   return NULL;
 740 }
 741 
 742 
 743 #ifndef PRODUCT
 744 bool G1CollectorPolicy::verify_young_ages() {
 745   HeapRegion* head = _g1->young_list()->first_region();
 746   return
 747     verify_young_ages(head, _short_lived_surv_rate_group);
 748   // also call verify_young_ages on any additional surv rate groups
 749 }
 750 
 751 bool
 752 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 753                                      SurvRateGroup *surv_rate_group) {
 754   guarantee( surv_rate_group != NULL, "pre-condition" );
 755 
 756   const char* name = surv_rate_group->name();
 757   bool ret = true;
 758   int prev_age = -1;
 759 
 760   for (HeapRegion* curr = head;
 761        curr != NULL;
 762        curr = curr->get_next_young_region()) {
 763     SurvRateGroup* group = curr->surv_rate_group();
 764     if (group == NULL && !curr->is_survivor()) {
 765       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
 766       ret = false;
 767     }
 768 
 769     if (surv_rate_group == group) {
 770       int age = curr->age_in_surv_rate_group();
 771 
 772       if (age < 0) {
 773         gclog_or_tty->print_cr("## %s: encountered negative age", name);
 774         ret = false;
 775       }
 776 
 777       if (age <= prev_age) {
 778         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
 779                                "(%d, %d)", name, age, prev_age);
 780         ret = false;
 781       }
 782       prev_age = age;
 783     }
 784   }
 785 
 786   return ret;
 787 }
 788 #endif // PRODUCT
 789 
 790 void G1CollectorPolicy::record_full_collection_start() {
 791   _full_collection_start_sec = os::elapsedTime();
 792   record_heap_size_info_at_start(true /* full */);
 793   // Release the future to-space so that it is available for compaction into.
 794   collector_state()->set_full_collection(true);
 795 }
 796 
 797 void G1CollectorPolicy::record_full_collection_end() {
 798   // Consider this like a collection pause for the purposes of allocation
 799   // since last pause.
 800   double end_sec = os::elapsedTime();
 801   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 802   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 803 
 804   _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
 805 
 806   update_recent_gc_times(end_sec, full_gc_time_ms);
 807 
 808   collector_state()->set_full_collection(false);
 809 
 810   // "Nuke" the heuristics that control the young/mixed GC
 811   // transitions and make sure we start with young GCs after the Full GC.
 812   collector_state()->set_gcs_are_young(true);
 813   collector_state()->set_last_young_gc(false);
 814   collector_state()->set_initiate_conc_mark_if_possible(false);
 815   collector_state()->set_during_initial_mark_pause(false);
 816   collector_state()->set_in_marking_window(false);
 817   collector_state()->set_in_marking_window_im(false);
 818 
 819   _short_lived_surv_rate_group->start_adding_regions();
 820   // also call this on any additional surv rate groups
 821 
 822   record_survivor_regions(0, NULL, NULL);
 823 
 824   _free_regions_at_end_of_collection = _g1->num_free_regions();
 825   // Reset survivors SurvRateGroup.
 826   _survivor_surv_rate_group->reset();
 827   update_young_list_max_and_target_length();
 828   update_rs_lengths_prediction();
 829   _collectionSetChooser->clear();
 830 }
 831 
 832 void G1CollectorPolicy::record_stop_world_start() {
 833   _stop_world_start = os::elapsedTime();
 834 }
 835 
 836 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
 837   // We only need to do this here as the policy will only be applied
 838   // to the GC we're about to start. so, no point is calculating this
 839   // every time we calculate / recalculate the target young length.
 840   update_survivors_policy();
 841 
 842   assert(_g1->used() == _g1->recalculate_used(),
 843          "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
 844          _g1->used(), _g1->recalculate_used());
 845 
 846   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 847   _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
 848   _stop_world_start = 0.0;
 849 
 850   record_heap_size_info_at_start(false /* full */);
 851 
 852   phase_times()->record_cur_collection_start_sec(start_time_sec);
 853   _pending_cards = _g1->pending_card_num();
 854 
 855   _collection_set_bytes_used_before = 0;
 856   _bytes_copied_during_gc = 0;
 857 
 858   collector_state()->set_last_gc_was_young(false);
 859 
 860   // do that for any other surv rate groups
 861   _short_lived_surv_rate_group->stop_adding_regions();
 862   _survivors_age_table.clear();
 863 
 864   assert( verify_young_ages(), "region age verification" );
 865 }
 866 
 867 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 868                                                    mark_init_elapsed_time_ms) {
 869   collector_state()->set_during_marking(true);
 870   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 871   collector_state()->set_during_initial_mark_pause(false);
 872   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 873 }
 874 
 875 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 876   _mark_remark_start_sec = os::elapsedTime();
 877   collector_state()->set_during_marking(false);
 878 }
 879 
 880 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 881   double end_time_sec = os::elapsedTime();
 882   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 883   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 884   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 885   _prev_collection_pause_end_ms += elapsed_time_ms;
 886 
 887   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec);
 888 }
 889 
 890 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 891   _mark_cleanup_start_sec = os::elapsedTime();
 892 }
 893 
 894 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 895   collector_state()->set_last_young_gc(true);
 896   collector_state()->set_in_marking_window(false);
 897 }
 898 
 899 void G1CollectorPolicy::record_concurrent_pause() {
 900   if (_stop_world_start > 0.0) {
 901     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
 902     _trace_young_gen_time_data.record_yield_time(yield_ms);
 903   }
 904 }
 905 
 906 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
 907   return phase_times()->average_time_ms(phase);
 908 }
 909 
 910 double G1CollectorPolicy::young_other_time_ms() const {
 911   return phase_times()->young_cset_choice_time_ms() +
 912          phase_times()->young_free_cset_time_ms();
 913 }
 914 
 915 double G1CollectorPolicy::non_young_other_time_ms() const {
 916   return phase_times()->non_young_cset_choice_time_ms() +
 917          phase_times()->non_young_free_cset_time_ms();
 918 
 919 }
 920 
 921 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
 922   return pause_time_ms -
 923          average_time_ms(G1GCPhaseTimes::UpdateRS) -
 924          average_time_ms(G1GCPhaseTimes::ScanRS) -
 925          average_time_ms(G1GCPhaseTimes::ObjCopy) -
 926          average_time_ms(G1GCPhaseTimes::Termination);
 927 }
 928 
 929 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
 930   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
 931 }
 932 
 933 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 934   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
 935     return false;
 936   }
 937 
 938   size_t marking_initiating_used_threshold =
 939     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 940   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 941   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 942 
 943   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
 944     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
 945       ergo_verbose5(ErgoConcCycles,
 946         "request concurrent cycle initiation",
 947         ergo_format_reason("occupancy higher than threshold")
 948         ergo_format_byte("occupancy")
 949         ergo_format_byte("allocation request")
 950         ergo_format_byte_perc("threshold")
 951         ergo_format_str("source"),
 952         cur_used_bytes,
 953         alloc_byte_size,
 954         marking_initiating_used_threshold,
 955         (double) InitiatingHeapOccupancyPercent,
 956         source);
 957       return true;
 958     } else {
 959       ergo_verbose5(ErgoConcCycles,
 960         "do not request concurrent cycle initiation",
 961         ergo_format_reason("still doing mixed collections")
 962         ergo_format_byte("occupancy")
 963         ergo_format_byte("allocation request")
 964         ergo_format_byte_perc("threshold")
 965         ergo_format_str("source"),
 966         cur_used_bytes,
 967         alloc_byte_size,
 968         marking_initiating_used_threshold,
 969         (double) InitiatingHeapOccupancyPercent,
 970         source);
 971     }
 972   }
 973 
 974   return false;
 975 }
 976 
 977 // Anything below that is considered to be zero
 978 #define MIN_TIMER_GRANULARITY 0.0000001
 979 
 980 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
 981   double end_time_sec = os::elapsedTime();
 982   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
 983          "otherwise, the subtraction below does not make sense");
 984   size_t rs_size =
 985             _cur_collection_pause_used_regions_at_start - cset_region_length();
 986   size_t cur_used_bytes = _g1->used();
 987   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 988   bool last_pause_included_initial_mark = false;
 989   bool update_stats = !_g1->evacuation_failed();
 990 
 991 #ifndef PRODUCT
 992   if (G1YoungSurvRateVerbose) {
 993     gclog_or_tty->cr();
 994     _short_lived_surv_rate_group->print();
 995     // do that for any other surv rate groups too
 996   }
 997 #endif // PRODUCT
 998 
 999   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1000   if (last_pause_included_initial_mark) {
1001     record_concurrent_mark_init_end(0.0);
1002   } else if (need_to_start_conc_mark("end of GC")) {
1003     // Note: this might have already been set, if during the last
1004     // pause we decided to start a cycle but at the beginning of
1005     // this pause we decided to postpone it. That's OK.
1006     collector_state()->set_initiate_conc_mark_if_possible(true);
1007   }
1008 
1009   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
1010 
1011   if (update_stats) {
1012     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1013     // this is where we update the allocation rate of the application
1014     double app_time_ms =
1015       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1016     if (app_time_ms < MIN_TIMER_GRANULARITY) {
1017       // This usually happens due to the timer not having the required
1018       // granularity. Some Linuxes are the usual culprits.
1019       // We'll just set it to something (arbitrarily) small.
1020       app_time_ms = 1.0;
1021     }
1022     // We maintain the invariant that all objects allocated by mutator
1023     // threads will be allocated out of eden regions. So, we can use
1024     // the eden region number allocated since the previous GC to
1025     // calculate the application's allocate rate. The only exception
1026     // to that is humongous objects that are allocated separately. But
1027     // given that humongous object allocations do not really affect
1028     // either the pause's duration nor when the next pause will take
1029     // place we can safely ignore them here.
1030     uint regions_allocated = eden_cset_region_length();
1031     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1032     _alloc_rate_ms_seq->add(alloc_rate_ms);
1033 
1034     double interval_ms =
1035       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1036     update_recent_gc_times(end_time_sec, pause_time_ms);
1037     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1038     if (recent_avg_pause_time_ratio() < 0.0 ||
1039         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1040       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1041       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1042       if (_recent_avg_pause_time_ratio < 0.0) {
1043         _recent_avg_pause_time_ratio = 0.0;
1044       } else {
1045         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1046         _recent_avg_pause_time_ratio = 1.0;
1047       }
1048     }
1049   }
1050 
1051   bool new_in_marking_window = collector_state()->in_marking_window();
1052   bool new_in_marking_window_im = false;
1053   if (last_pause_included_initial_mark) {
1054     new_in_marking_window = true;
1055     new_in_marking_window_im = true;
1056   }
1057 
1058   if (collector_state()->last_young_gc()) {
1059     // This is supposed to to be the "last young GC" before we start
1060     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1061 
1062     if (!last_pause_included_initial_mark) {
1063       if (next_gc_should_be_mixed("start mixed GCs",
1064                                   "do not start mixed GCs")) {
1065         collector_state()->set_gcs_are_young(false);
1066       }
1067     } else {
1068       ergo_verbose0(ErgoMixedGCs,
1069                     "do not start mixed GCs",
1070                     ergo_format_reason("concurrent cycle is about to start"));
1071     }
1072     collector_state()->set_last_young_gc(false);
1073   }
1074 
1075   if (!collector_state()->last_gc_was_young()) {
1076     // This is a mixed GC. Here we decide whether to continue doing
1077     // mixed GCs or not.
1078 
1079     if (!next_gc_should_be_mixed("continue mixed GCs",
1080                                  "do not continue mixed GCs")) {
1081       collector_state()->set_gcs_are_young(true);
1082     }
1083   }
1084 
1085   _short_lived_surv_rate_group->start_adding_regions();
1086   // Do that for any other surv rate groups
1087 
1088   if (update_stats) {
1089     double cost_per_card_ms = 0.0;
1090     double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
1091     if (_pending_cards > 0) {
1092       cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
1093       _cost_per_card_ms_seq->add(cost_per_card_ms);
1094     }
1095     _cost_scan_hcc_seq->add(cost_scan_hcc);
1096 
1097     double cost_per_entry_ms = 0.0;
1098     if (cards_scanned > 10) {
1099       cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
1100       if (collector_state()->last_gc_was_young()) {
1101         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1102       } else {
1103         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1104       }
1105     }
1106 
1107     if (_max_rs_lengths > 0) {
1108       double cards_per_entry_ratio =
1109         (double) cards_scanned / (double) _max_rs_lengths;
1110       if (collector_state()->last_gc_was_young()) {
1111         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1112       } else {
1113         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1114       }
1115     }
1116 
1117     // This is defensive. For a while _max_rs_lengths could get
1118     // smaller than _recorded_rs_lengths which was causing
1119     // rs_length_diff to get very large and mess up the RSet length
1120     // predictions. The reason was unsafe concurrent updates to the
1121     // _inc_cset_recorded_rs_lengths field which the code below guards
1122     // against (see CR 7118202). This bug has now been fixed (see CR
1123     // 7119027). However, I'm still worried that
1124     // _inc_cset_recorded_rs_lengths might still end up somewhat
1125     // inaccurate. The concurrent refinement thread calculates an
1126     // RSet's length concurrently with other CR threads updating it
1127     // which might cause it to calculate the length incorrectly (if,
1128     // say, it's in mid-coarsening). So I'll leave in the defensive
1129     // conditional below just in case.
1130     size_t rs_length_diff = 0;
1131     if (_max_rs_lengths > _recorded_rs_lengths) {
1132       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1133     }
1134     _rs_length_diff_seq->add((double) rs_length_diff);
1135 
1136     size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
1137     size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
1138     double cost_per_byte_ms = 0.0;
1139 
1140     if (copied_bytes > 0) {
1141       cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
1142       if (collector_state()->in_marking_window()) {
1143         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1144       } else {
1145         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1146       }
1147     }
1148 
1149     if (young_cset_region_length() > 0) {
1150       _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
1151                                                young_cset_region_length());
1152     }
1153 
1154     if (old_cset_region_length() > 0) {
1155       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
1156                                                    old_cset_region_length());
1157     }
1158 
1159     _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1160 
1161     _pending_cards_seq->add((double) _pending_cards);
1162     _rs_lengths_seq->add((double) _max_rs_lengths);
1163   }
1164 
1165   collector_state()->set_in_marking_window(new_in_marking_window);
1166   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1167   _free_regions_at_end_of_collection = _g1->num_free_regions();
1168   update_young_list_max_and_target_length();
1169   update_rs_lengths_prediction();
1170 
1171   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1172   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1173 
1174   double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1175 
1176   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1177     ergo_verbose2(ErgoTiming,
1178                   "adjust concurrent refinement thresholds",
1179                   ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
1180                   ergo_format_ms("Update RS time goal")
1181                   ergo_format_ms("Scan HCC time"),
1182                   update_rs_time_goal_ms,
1183                   scan_hcc_time_ms);
1184 
1185     update_rs_time_goal_ms = 0;
1186   } else {
1187     update_rs_time_goal_ms -= scan_hcc_time_ms;
1188   }
1189   adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1190                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1191                                update_rs_time_goal_ms);
1192 
1193   _collectionSetChooser->verify();
1194 }
1195 
1196 #define EXT_SIZE_FORMAT "%.1f%s"
1197 #define EXT_SIZE_PARAMS(bytes)                                  \
1198   byte_size_in_proper_unit((double)(bytes)),                    \
1199   proper_unit_for_byte_size((bytes))
1200 
1201 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1202   YoungList* young_list = _g1->young_list();
1203   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1204   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1205   _heap_capacity_bytes_before_gc = _g1->capacity();
1206   _heap_used_bytes_before_gc = _g1->used();
1207   _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1208 
1209   _eden_capacity_bytes_before_gc =
1210          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1211 
1212   if (full) {
1213     _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1214   }
1215 }
1216 
1217 void G1CollectorPolicy::print_heap_transition(size_t bytes_before) const {
1218   size_t bytes_after = _g1->used();
1219   size_t capacity = _g1->capacity();
1220 
1221   gclog_or_tty->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)",
1222       byte_size_in_proper_unit(bytes_before),
1223       proper_unit_for_byte_size(bytes_before),
1224       byte_size_in_proper_unit(bytes_after),
1225       proper_unit_for_byte_size(bytes_after),
1226       byte_size_in_proper_unit(capacity),
1227       proper_unit_for_byte_size(capacity));
1228 }
1229 
1230 void G1CollectorPolicy::print_heap_transition() const {
1231   print_heap_transition(_heap_used_bytes_before_gc);
1232 }
1233 
1234 void G1CollectorPolicy::print_detailed_heap_transition(bool full) const {
1235   YoungList* young_list = _g1->young_list();
1236 
1237   size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
1238   size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
1239   size_t heap_used_bytes_after_gc = _g1->used();
1240 
1241   size_t heap_capacity_bytes_after_gc = _g1->capacity();
1242   size_t eden_capacity_bytes_after_gc =
1243     (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
1244 
1245   gclog_or_tty->print(
1246     "   [Eden: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ") "
1247     "Survivors: " EXT_SIZE_FORMAT "->" EXT_SIZE_FORMAT " "
1248     "Heap: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->"
1249     EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")]",
1250     EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
1251     EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
1252     EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
1253     EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
1254     EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
1255     EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
1256     EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
1257     EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
1258     EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
1259     EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
1260 
1261   if (full) {
1262     MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
1263   }
1264 
1265   gclog_or_tty->cr();
1266 }
1267 
1268 void G1CollectorPolicy::print_phases(double pause_time_sec) {
1269   phase_times()->print(pause_time_sec);
1270 }
1271 
1272 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1273                                                      double update_rs_processed_buffers,
1274                                                      double goal_ms) {
1275   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1276   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1277 
1278   if (G1UseAdaptiveConcRefinement) {
1279     const int k_gy = 3, k_gr = 6;
1280     const double inc_k = 1.1, dec_k = 0.9;
1281 
1282     int g = cg1r->green_zone();
1283     if (update_rs_time > goal_ms) {
1284       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
1285     } else {
1286       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1287         g = (int)MAX2(g * inc_k, g + 1.0);
1288       }
1289     }
1290     // Change the refinement threads params
1291     cg1r->set_green_zone(g);
1292     cg1r->set_yellow_zone(g * k_gy);
1293     cg1r->set_red_zone(g * k_gr);
1294     cg1r->reinitialize_threads();
1295 
1296     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1);
1297     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1298                                     cg1r->yellow_zone());
1299     // Change the barrier params
1300     dcqs.set_process_completed_threshold(processing_threshold);
1301     dcqs.set_max_completed_queue(cg1r->red_zone());
1302   }
1303 
1304   int curr_queue_size = dcqs.completed_buffers_num();
1305   if (curr_queue_size >= cg1r->yellow_zone()) {
1306     dcqs.set_completed_queue_padding(curr_queue_size);
1307   } else {
1308     dcqs.set_completed_queue_padding(0);
1309   }
1310   dcqs.notify_if_necessary();
1311 }
1312 
1313 size_t G1CollectorPolicy::predict_rs_length_diff() const {
1314   return (size_t) get_new_prediction(_rs_length_diff_seq);
1315 }
1316 
1317 double G1CollectorPolicy::predict_alloc_rate_ms() const {
1318   return get_new_prediction(_alloc_rate_ms_seq);
1319 }
1320 
1321 double G1CollectorPolicy::predict_cost_per_card_ms() const {
1322   return get_new_prediction(_cost_per_card_ms_seq);
1323 }
1324 
1325 double G1CollectorPolicy::predict_scan_hcc_ms() const {
1326   return get_new_prediction(_cost_scan_hcc_seq);
1327 }
1328 
1329 double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const {
1330   return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
1331 }
1332 
1333 double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const {
1334   return get_new_prediction(_young_cards_per_entry_ratio_seq);
1335 }
1336 
1337 double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const {
1338   if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
1339     return predict_young_cards_per_entry_ratio();
1340   } else {
1341     return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
1342   }
1343 }
1344 
1345 size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const {
1346   return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
1347 }
1348 
1349 size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const {
1350   return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio());
1351 }
1352 
1353 double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const {
1354   if (collector_state()->gcs_are_young()) {
1355     return card_num * get_new_prediction(_cost_per_entry_ms_seq);
1356   } else {
1357     return predict_mixed_rs_scan_time_ms(card_num);
1358   }
1359 }
1360 
1361 double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const {
1362   if (_mixed_cost_per_entry_ms_seq->num() < 3) {
1363     return card_num * get_new_prediction(_cost_per_entry_ms_seq);
1364   } else {
1365     return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
1366   }
1367 }
1368 
1369 double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
1370   if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
1371     return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
1372   } else {
1373     return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
1374   }
1375 }
1376 
1377 double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const {
1378   if (collector_state()->during_concurrent_mark()) {
1379     return predict_object_copy_time_ms_during_cm(bytes_to_copy);
1380   } else {
1381     return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
1382   }
1383 }
1384 
1385 double G1CollectorPolicy::predict_constant_other_time_ms() const {
1386   return get_new_prediction(_constant_other_time_ms_seq);
1387 }
1388 
1389 double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const {
1390   return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
1391 }
1392 
1393 double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const {
1394   return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
1395 }
1396 
1397 double G1CollectorPolicy::predict_remark_time_ms() const {
1398   return get_new_prediction(_concurrent_mark_remark_times_ms);
1399 }
1400 
1401 double G1CollectorPolicy::predict_cleanup_time_ms() const {
1402   return get_new_prediction(_concurrent_mark_cleanup_times_ms);
1403 }
1404 
1405 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
1406   TruncatedSeq* seq = surv_rate_group->get_seq(age);
1407   guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
1408   double pred = get_new_prediction(seq);
1409   if (pred > 1.0) {
1410     pred = 1.0;
1411   }
1412   return pred;
1413 }
1414 
1415 double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
1416   return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
1417 }
1418 
1419 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
1420   return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
1421 }
1422 
1423 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1424                                                        size_t scanned_cards) const {
1425   return
1426     predict_rs_update_time_ms(pending_cards) +
1427     predict_rs_scan_time_ms(scanned_cards) +
1428     predict_constant_other_time_ms();
1429 }
1430 
1431 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
1432   size_t rs_length = predict_rs_length_diff();
1433   size_t card_num;
1434   if (collector_state()->gcs_are_young()) {
1435     card_num = predict_young_card_num(rs_length);
1436   } else {
1437     card_num = predict_non_young_card_num(rs_length);
1438   }
1439   return predict_base_elapsed_time_ms(pending_cards, card_num);
1440 }
1441 
1442 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
1443   size_t bytes_to_copy;
1444   if (hr->is_marked())
1445     bytes_to_copy = hr->max_live_bytes();
1446   else {
1447     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1448     int age = hr->age_in_surv_rate_group();
1449     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1450     bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
1451   }
1452   return bytes_to_copy;
1453 }
1454 
1455 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1456                                                          bool for_young_gc) const {
1457   size_t rs_length = hr->rem_set()->occupied();
1458   size_t card_num;
1459 
1460   // Predicting the number of cards is based on which type of GC
1461   // we're predicting for.
1462   if (for_young_gc) {
1463     card_num = predict_young_card_num(rs_length);
1464   } else {
1465     card_num = predict_non_young_card_num(rs_length);
1466   }
1467   size_t bytes_to_copy = predict_bytes_to_copy(hr);
1468 
1469   double region_elapsed_time_ms =
1470     predict_rs_scan_time_ms(card_num) +
1471     predict_object_copy_time_ms(bytes_to_copy);
1472 
1473   // The prediction of the "other" time for this region is based
1474   // upon the region type and NOT the GC type.
1475   if (hr->is_young()) {
1476     region_elapsed_time_ms += predict_young_other_time_ms(1);
1477   } else {
1478     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1479   }
1480   return region_elapsed_time_ms;
1481 }
1482 
1483 void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
1484                                                  uint survivor_cset_region_length) {
1485   _eden_cset_region_length     = eden_cset_region_length;
1486   _survivor_cset_region_length = survivor_cset_region_length;
1487   _old_cset_region_length      = 0;
1488 }
1489 
1490 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1491   _recorded_rs_lengths = rs_lengths;
1492 }
1493 
1494 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1495                                                double elapsed_ms) {
1496   _recent_gc_times_ms->add(elapsed_ms);
1497   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1498   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1499 }
1500 
1501 size_t G1CollectorPolicy::expansion_amount() const {
1502   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1503   double threshold = _gc_overhead_perc;
1504   if (recent_gc_overhead > threshold) {
1505     // We will double the existing space, or take
1506     // G1ExpandByPercentOfAvailable % of the available expansion
1507     // space, whichever is smaller, bounded below by a minimum
1508     // expansion (unless that's all that's left.)
1509     const size_t min_expand_bytes = 1*M;
1510     size_t reserved_bytes = _g1->max_capacity();
1511     size_t committed_bytes = _g1->capacity();
1512     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1513     size_t expand_bytes;
1514     size_t expand_bytes_via_pct =
1515       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1516     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1517     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1518     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1519 
1520     ergo_verbose5(ErgoHeapSizing,
1521                   "attempt heap expansion",
1522                   ergo_format_reason("recent GC overhead higher than "
1523                                      "threshold after GC")
1524                   ergo_format_perc("recent GC overhead")
1525                   ergo_format_perc("threshold")
1526                   ergo_format_byte("uncommitted")
1527                   ergo_format_byte_perc("calculated expansion amount"),
1528                   recent_gc_overhead, threshold,
1529                   uncommitted_bytes,
1530                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1531 
1532     return expand_bytes;
1533   } else {
1534     return 0;
1535   }
1536 }
1537 
1538 void G1CollectorPolicy::print_tracing_info() const {
1539   _trace_young_gen_time_data.print();
1540   _trace_old_gen_time_data.print();
1541 }
1542 
1543 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1544 #ifndef PRODUCT
1545   _short_lived_surv_rate_group->print_surv_rate_summary();
1546   // add this call for any other surv rate groups
1547 #endif // PRODUCT
1548 }
1549 
1550 bool G1CollectorPolicy::is_young_list_full() const {
1551   uint young_list_length = _g1->young_list()->length();
1552   uint young_list_target_length = _young_list_target_length;
1553   return young_list_length >= young_list_target_length;
1554 }
1555 
1556 bool G1CollectorPolicy::can_expand_young_list() const {
1557   uint young_list_length = _g1->young_list()->length();
1558   uint young_list_max_length = _young_list_max_length;
1559   return young_list_length < young_list_max_length;
1560 }
1561 
1562 void G1CollectorPolicy::update_max_gc_locker_expansion() {
1563   uint expansion_region_num = 0;
1564   if (GCLockerEdenExpansionPercent > 0) {
1565     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1566     double expansion_region_num_d = perc * (double) _young_list_target_length;
1567     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1568     // less than 1.0) we'll get 1.
1569     expansion_region_num = (uint) ceil(expansion_region_num_d);
1570   } else {
1571     assert(expansion_region_num == 0, "sanity");
1572   }
1573   _young_list_max_length = _young_list_target_length + expansion_region_num;
1574   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1575 }
1576 
1577 // Calculates survivor space parameters.
1578 void G1CollectorPolicy::update_survivors_policy() {
1579   double max_survivor_regions_d =
1580                  (double) _young_list_target_length / (double) SurvivorRatio;
1581   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1582   // smaller than 1.0) we'll get 1.
1583   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1584 
1585   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1586         HeapRegion::GrainWords * _max_survivor_regions, counters());
1587 }
1588 
1589 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1590                                                      GCCause::Cause gc_cause) {
1591   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1592   if (!during_cycle) {
1593     ergo_verbose1(ErgoConcCycles,
1594                   "request concurrent cycle initiation",
1595                   ergo_format_reason("requested by GC cause")
1596                   ergo_format_str("GC cause"),
1597                   GCCause::to_string(gc_cause));
1598     collector_state()->set_initiate_conc_mark_if_possible(true);
1599     return true;
1600   } else {
1601     ergo_verbose1(ErgoConcCycles,
1602                   "do not request concurrent cycle initiation",
1603                   ergo_format_reason("concurrent cycle already in progress")
1604                   ergo_format_str("GC cause"),
1605                   GCCause::to_string(gc_cause));
1606     return false;
1607   }
1608 }
1609 
1610 void
1611 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1612   // We are about to decide on whether this pause will be an
1613   // initial-mark pause.
1614 
1615   // First, collector_state()->during_initial_mark_pause() should not be already set. We
1616   // will set it here if we have to. However, it should be cleared by
1617   // the end of the pause (it's only set for the duration of an
1618   // initial-mark pause).
1619   assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1620 
1621   if (collector_state()->initiate_conc_mark_if_possible()) {
1622     // We had noticed on a previous pause that the heap occupancy has
1623     // gone over the initiating threshold and we should start a
1624     // concurrent marking cycle. So we might initiate one.
1625 
1626     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1627     if (!during_cycle) {
1628       // The concurrent marking thread is not "during a cycle", i.e.,
1629       // it has completed the last one. So we can go ahead and
1630       // initiate a new cycle.
1631 
1632       collector_state()->set_during_initial_mark_pause(true);
1633       // We do not allow mixed GCs during marking.
1634       if (!collector_state()->gcs_are_young()) {
1635         collector_state()->set_gcs_are_young(true);
1636         ergo_verbose0(ErgoMixedGCs,
1637                       "end mixed GCs",
1638                       ergo_format_reason("concurrent cycle is about to start"));
1639       }
1640 
1641       // And we can now clear initiate_conc_mark_if_possible() as
1642       // we've already acted on it.
1643       collector_state()->set_initiate_conc_mark_if_possible(false);
1644 
1645       ergo_verbose0(ErgoConcCycles,
1646                   "initiate concurrent cycle",
1647                   ergo_format_reason("concurrent cycle initiation requested"));
1648     } else {
1649       // The concurrent marking thread is still finishing up the
1650       // previous cycle. If we start one right now the two cycles
1651       // overlap. In particular, the concurrent marking thread might
1652       // be in the process of clearing the next marking bitmap (which
1653       // we will use for the next cycle if we start one). Starting a
1654       // cycle now will be bad given that parts of the marking
1655       // information might get cleared by the marking thread. And we
1656       // cannot wait for the marking thread to finish the cycle as it
1657       // periodically yields while clearing the next marking bitmap
1658       // and, if it's in a yield point, it's waiting for us to
1659       // finish. So, at this point we will not start a cycle and we'll
1660       // let the concurrent marking thread complete the last one.
1661       ergo_verbose0(ErgoConcCycles,
1662                     "do not initiate concurrent cycle",
1663                     ergo_format_reason("concurrent cycle already in progress"));
1664     }
1665   }
1666 }
1667 
1668 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1669   G1CollectedHeap* _g1h;
1670   CSetChooserParUpdater _cset_updater;
1671 
1672 public:
1673   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1674                            uint chunk_size) :
1675     _g1h(G1CollectedHeap::heap()),
1676     _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1677 
1678   bool doHeapRegion(HeapRegion* r) {
1679     // Do we have any marking information for this region?
1680     if (r->is_marked()) {
1681       // We will skip any region that's currently used as an old GC
1682       // alloc region (we should not consider those for collection
1683       // before we fill them up).
1684       if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1685         _cset_updater.add_region(r);
1686       }
1687     }
1688     return false;
1689   }
1690 };
1691 
1692 class ParKnownGarbageTask: public AbstractGangTask {
1693   CollectionSetChooser* _hrSorted;
1694   uint _chunk_size;
1695   G1CollectedHeap* _g1;
1696   HeapRegionClaimer _hrclaimer;
1697 
1698 public:
1699   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
1700       AbstractGangTask("ParKnownGarbageTask"),
1701       _hrSorted(hrSorted), _chunk_size(chunk_size),
1702       _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
1703 
1704   void work(uint worker_id) {
1705     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1706     _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
1707   }
1708 };
1709 
1710 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
1711   assert(n_workers > 0, "Active gc workers should be greater than 0");
1712   const uint overpartition_factor = 4;
1713   const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
1714   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
1715 }
1716 
1717 void
1718 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1719   _collectionSetChooser->clear();
1720 
1721   WorkGang* workers = _g1->workers();
1722   uint n_workers = workers->active_workers();
1723 
1724   uint n_regions = _g1->num_regions();
1725   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1726   _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1727   ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
1728   workers->run_task(&par_known_garbage_task);
1729 
1730   _collectionSetChooser->sort_regions();
1731 
1732   double end_sec = os::elapsedTime();
1733   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1734   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1735   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1736   _prev_collection_pause_end_ms += elapsed_time_ms;
1737   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec);
1738 }
1739 
1740 // Add the heap region at the head of the non-incremental collection set
1741 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1742   assert(_inc_cset_build_state == Active, "Precondition");
1743   assert(hr->is_old(), "the region should be old");
1744 
1745   assert(!hr->in_collection_set(), "should not already be in the CSet");
1746   _g1->register_old_region_with_cset(hr);
1747   hr->set_next_in_collection_set(_collection_set);
1748   _collection_set = hr;
1749   _collection_set_bytes_used_before += hr->used();
1750   size_t rs_length = hr->rem_set()->occupied();
1751   _recorded_rs_lengths += rs_length;
1752   _old_cset_region_length += 1;
1753 }
1754 
1755 // Initialize the per-collection-set information
1756 void G1CollectorPolicy::start_incremental_cset_building() {
1757   assert(_inc_cset_build_state == Inactive, "Precondition");
1758 
1759   _inc_cset_head = NULL;
1760   _inc_cset_tail = NULL;
1761   _inc_cset_bytes_used_before = 0;
1762 
1763   _inc_cset_max_finger = 0;
1764   _inc_cset_recorded_rs_lengths = 0;
1765   _inc_cset_recorded_rs_lengths_diffs = 0;
1766   _inc_cset_predicted_elapsed_time_ms = 0.0;
1767   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1768   _inc_cset_build_state = Active;
1769 }
1770 
1771 void G1CollectorPolicy::finalize_incremental_cset_building() {
1772   assert(_inc_cset_build_state == Active, "Precondition");
1773   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1774 
1775   // The two "main" fields, _inc_cset_recorded_rs_lengths and
1776   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
1777   // that adds a new region to the CSet. Further updates by the
1778   // concurrent refinement thread that samples the young RSet lengths
1779   // are accumulated in the *_diffs fields. Here we add the diffs to
1780   // the "main" fields.
1781 
1782   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
1783     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
1784   } else {
1785     // This is defensive. The diff should in theory be always positive
1786     // as RSets can only grow between GCs. However, given that we
1787     // sample their size concurrently with other threads updating them
1788     // it's possible that we might get the wrong size back, which
1789     // could make the calculations somewhat inaccurate.
1790     size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
1791     if (_inc_cset_recorded_rs_lengths >= diffs) {
1792       _inc_cset_recorded_rs_lengths -= diffs;
1793     } else {
1794       _inc_cset_recorded_rs_lengths = 0;
1795     }
1796   }
1797   _inc_cset_predicted_elapsed_time_ms +=
1798                                      _inc_cset_predicted_elapsed_time_ms_diffs;
1799 
1800   _inc_cset_recorded_rs_lengths_diffs = 0;
1801   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1802 }
1803 
1804 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1805   // This routine is used when:
1806   // * adding survivor regions to the incremental cset at the end of an
1807   //   evacuation pause,
1808   // * adding the current allocation region to the incremental cset
1809   //   when it is retired, and
1810   // * updating existing policy information for a region in the
1811   //   incremental cset via young list RSet sampling.
1812   // Therefore this routine may be called at a safepoint by the
1813   // VM thread, or in-between safepoints by mutator threads (when
1814   // retiring the current allocation region) or a concurrent
1815   // refine thread (RSet sampling).
1816 
1817   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
1818   size_t used_bytes = hr->used();
1819   _inc_cset_recorded_rs_lengths += rs_length;
1820   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1821   _inc_cset_bytes_used_before += used_bytes;
1822 
1823   // Cache the values we have added to the aggregated information
1824   // in the heap region in case we have to remove this region from
1825   // the incremental collection set, or it is updated by the
1826   // rset sampling code
1827   hr->set_recorded_rs_length(rs_length);
1828   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1829 }
1830 
1831 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1832                                                      size_t new_rs_length) {
1833   // Update the CSet information that is dependent on the new RS length
1834   assert(hr->is_young(), "Precondition");
1835   assert(!SafepointSynchronize::is_at_safepoint(),
1836                                                "should not be at a safepoint");
1837 
1838   // We could have updated _inc_cset_recorded_rs_lengths and
1839   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1840   // that atomically, as this code is executed by a concurrent
1841   // refinement thread, potentially concurrently with a mutator thread
1842   // allocating a new region and also updating the same fields. To
1843   // avoid the atomic operations we accumulate these updates on two
1844   // separate fields (*_diffs) and we'll just add them to the "main"
1845   // fields at the start of a GC.
1846 
1847   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1848   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1849   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1850 
1851   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1852   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
1853   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1854   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1855 
1856   hr->set_recorded_rs_length(new_rs_length);
1857   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1858 }
1859 
1860 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1861   assert(hr->is_young(), "invariant");
1862   assert(hr->young_index_in_cset() > -1, "should have already been set");
1863   assert(_inc_cset_build_state == Active, "Precondition");
1864 
1865   // We need to clear and set the cached recorded/cached collection set
1866   // information in the heap region here (before the region gets added
1867   // to the collection set). An individual heap region's cached values
1868   // are calculated, aggregated with the policy collection set info,
1869   // and cached in the heap region here (initially) and (subsequently)
1870   // by the Young List sampling code.
1871 
1872   size_t rs_length = hr->rem_set()->occupied();
1873   add_to_incremental_cset_info(hr, rs_length);
1874 
1875   HeapWord* hr_end = hr->end();
1876   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
1877 
1878   assert(!hr->in_collection_set(), "invariant");
1879   _g1->register_young_region_with_cset(hr);
1880   assert(hr->next_in_collection_set() == NULL, "invariant");
1881 }
1882 
1883 // Add the region at the RHS of the incremental cset
1884 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1885   // We should only ever be appending survivors at the end of a pause
1886   assert(hr->is_survivor(), "Logic");
1887 
1888   // Do the 'common' stuff
1889   add_region_to_incremental_cset_common(hr);
1890 
1891   // Now add the region at the right hand side
1892   if (_inc_cset_tail == NULL) {
1893     assert(_inc_cset_head == NULL, "invariant");
1894     _inc_cset_head = hr;
1895   } else {
1896     _inc_cset_tail->set_next_in_collection_set(hr);
1897   }
1898   _inc_cset_tail = hr;
1899 }
1900 
1901 // Add the region to the LHS of the incremental cset
1902 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
1903   // Survivors should be added to the RHS at the end of a pause
1904   assert(hr->is_eden(), "Logic");
1905 
1906   // Do the 'common' stuff
1907   add_region_to_incremental_cset_common(hr);
1908 
1909   // Add the region at the left hand side
1910   hr->set_next_in_collection_set(_inc_cset_head);
1911   if (_inc_cset_head == NULL) {
1912     assert(_inc_cset_tail == NULL, "Invariant");
1913     _inc_cset_tail = hr;
1914   }
1915   _inc_cset_head = hr;
1916 }
1917 
1918 #ifndef PRODUCT
1919 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
1920   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
1921 
1922   st->print_cr("\nCollection_set:");
1923   HeapRegion* csr = list_head;
1924   while (csr != NULL) {
1925     HeapRegion* next = csr->next_in_collection_set();
1926     assert(csr->in_collection_set(), "bad CS");
1927     st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
1928                  HR_FORMAT_PARAMS(csr),
1929                  p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
1930                  csr->age_in_surv_rate_group_cond());
1931     csr = next;
1932   }
1933 }
1934 #endif // !PRODUCT
1935 
1936 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1937   // Returns the given amount of reclaimable bytes (that represents
1938   // the amount of reclaimable space still to be collected) as a
1939   // percentage of the current heap capacity.
1940   size_t capacity_bytes = _g1->capacity();
1941   return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1942 }
1943 
1944 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1945                                                 const char* false_action_str) const {
1946   CollectionSetChooser* cset_chooser = _collectionSetChooser;
1947   if (cset_chooser->is_empty()) {
1948     ergo_verbose0(ErgoMixedGCs,
1949                   false_action_str,
1950                   ergo_format_reason("candidate old regions not available"));
1951     return false;
1952   }
1953 
1954   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1955   size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1956   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1957   double threshold = (double) G1HeapWastePercent;
1958   if (reclaimable_perc <= threshold) {
1959     ergo_verbose4(ErgoMixedGCs,
1960               false_action_str,
1961               ergo_format_reason("reclaimable percentage not over threshold")
1962               ergo_format_region("candidate old regions")
1963               ergo_format_byte_perc("reclaimable")
1964               ergo_format_perc("threshold"),
1965               cset_chooser->remaining_regions(),
1966               reclaimable_bytes,
1967               reclaimable_perc, threshold);
1968     return false;
1969   }
1970 
1971   ergo_verbose4(ErgoMixedGCs,
1972                 true_action_str,
1973                 ergo_format_reason("candidate old regions available")
1974                 ergo_format_region("candidate old regions")
1975                 ergo_format_byte_perc("reclaimable")
1976                 ergo_format_perc("threshold"),
1977                 cset_chooser->remaining_regions(),
1978                 reclaimable_bytes,
1979                 reclaimable_perc, threshold);
1980   return true;
1981 }
1982 
1983 uint G1CollectorPolicy::calc_min_old_cset_length() const {
1984   // The min old CSet region bound is based on the maximum desired
1985   // number of mixed GCs after a cycle. I.e., even if some old regions
1986   // look expensive, we should add them to the CSet anyway to make
1987   // sure we go through the available old regions in no more than the
1988   // maximum desired number of mixed GCs.
1989   //
1990   // The calculation is based on the number of marked regions we added
1991   // to the CSet chooser in the first place, not how many remain, so
1992   // that the result is the same during all mixed GCs that follow a cycle.
1993 
1994   const size_t region_num = (size_t) _collectionSetChooser->length();
1995   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1996   size_t result = region_num / gc_num;
1997   // emulate ceiling
1998   if (result * gc_num < region_num) {
1999     result += 1;
2000   }
2001   return (uint) result;
2002 }
2003 
2004 uint G1CollectorPolicy::calc_max_old_cset_length() const {
2005   // The max old CSet region bound is based on the threshold expressed
2006   // as a percentage of the heap size. I.e., it should bound the
2007   // number of old regions added to the CSet irrespective of how many
2008   // of them are available.
2009 
2010   const G1CollectedHeap* g1h = G1CollectedHeap::heap();
2011   const size_t region_num = g1h->num_regions();
2012   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
2013   size_t result = region_num * perc / 100;
2014   // emulate ceiling
2015   if (100 * result < region_num * perc) {
2016     result += 1;
2017   }
2018   return (uint) result;
2019 }
2020 
2021 
2022 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2023   double young_start_time_sec = os::elapsedTime();
2024 
2025   YoungList* young_list = _g1->young_list();
2026   finalize_incremental_cset_building();
2027 
2028   guarantee(target_pause_time_ms > 0.0,
2029             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2030   guarantee(_collection_set == NULL, "Precondition");
2031 
2032   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2033   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2034 
2035   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
2036                 "start choosing CSet",
2037                 ergo_format_size("_pending_cards")
2038                 ergo_format_ms("predicted base time")
2039                 ergo_format_ms("remaining time")
2040                 ergo_format_ms("target pause time"),
2041                 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2042 
2043   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2044 
2045   if (collector_state()->last_gc_was_young()) {
2046     _trace_young_gen_time_data.increment_young_collection_count();
2047   } else {
2048     _trace_young_gen_time_data.increment_mixed_collection_count();
2049   }
2050 
2051   // The young list is laid with the survivor regions from the previous
2052   // pause are appended to the RHS of the young list, i.e.
2053   //   [Newly Young Regions ++ Survivors from last pause].
2054 
2055   uint survivor_region_length = young_list->survivor_length();
2056   uint eden_region_length = young_list->eden_length();
2057   init_cset_region_lengths(eden_region_length, survivor_region_length);
2058 
2059   HeapRegion* hr = young_list->first_survivor_region();
2060   while (hr != NULL) {
2061     assert(hr->is_survivor(), "badly formed young list");
2062     // There is a convention that all the young regions in the CSet
2063     // are tagged as "eden", so we do this for the survivors here. We
2064     // use the special set_eden_pre_gc() as it doesn't check that the
2065     // region is free (which is not the case here).
2066     hr->set_eden_pre_gc();
2067     hr = hr->get_next_young_region();
2068   }
2069 
2070   // Clear the fields that point to the survivor list - they are all young now.
2071   young_list->clear_survivors();
2072 
2073   _collection_set = _inc_cset_head;
2074   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2075   time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
2076 
2077   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
2078                 "add young regions to CSet",
2079                 ergo_format_region("eden")
2080                 ergo_format_region("survivors")
2081                 ergo_format_ms("predicted young region time")
2082                 ergo_format_ms("target pause time"),
2083                 eden_region_length, survivor_region_length,
2084                 _inc_cset_predicted_elapsed_time_ms,
2085                 target_pause_time_ms);
2086 
2087   // The number of recorded young regions is the incremental
2088   // collection set's current size
2089   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2090 
2091   double young_end_time_sec = os::elapsedTime();
2092   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
2093 
2094   return time_remaining_ms;
2095 }
2096 
2097 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
2098   double non_young_start_time_sec = os::elapsedTime();
2099   double predicted_old_time_ms = 0.0;
2100 
2101 
2102   if (!collector_state()->gcs_are_young()) {
2103     CollectionSetChooser* cset_chooser = _collectionSetChooser;
2104     cset_chooser->verify();
2105     const uint min_old_cset_length = calc_min_old_cset_length();
2106     const uint max_old_cset_length = calc_max_old_cset_length();
2107 
2108     uint expensive_region_num = 0;
2109     bool check_time_remaining = adaptive_young_list_length();
2110 
2111     HeapRegion* hr = cset_chooser->peek();
2112     while (hr != NULL) {
2113       if (old_cset_region_length() >= max_old_cset_length) {
2114         // Added maximum number of old regions to the CSet.
2115         ergo_verbose2(ErgoCSetConstruction,
2116                       "finish adding old regions to CSet",
2117                       ergo_format_reason("old CSet region num reached max")
2118                       ergo_format_region("old")
2119                       ergo_format_region("max"),
2120                       old_cset_region_length(), max_old_cset_length);
2121         break;
2122       }
2123 
2124 
2125       // Stop adding regions if the remaining reclaimable space is
2126       // not above G1HeapWastePercent.
2127       size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2128       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2129       double threshold = (double) G1HeapWastePercent;
2130       if (reclaimable_perc <= threshold) {
2131         // We've added enough old regions that the amount of uncollected
2132         // reclaimable space is at or below the waste threshold. Stop
2133         // adding old regions to the CSet.
2134         ergo_verbose5(ErgoCSetConstruction,
2135                       "finish adding old regions to CSet",
2136                       ergo_format_reason("reclaimable percentage not over threshold")
2137                       ergo_format_region("old")
2138                       ergo_format_region("max")
2139                       ergo_format_byte_perc("reclaimable")
2140                       ergo_format_perc("threshold"),
2141                       old_cset_region_length(),
2142                       max_old_cset_length,
2143                       reclaimable_bytes,
2144                       reclaimable_perc, threshold);
2145         break;
2146       }
2147 
2148       double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2149       if (check_time_remaining) {
2150         if (predicted_time_ms > time_remaining_ms) {
2151           // Too expensive for the current CSet.
2152 
2153           if (old_cset_region_length() >= min_old_cset_length) {
2154             // We have added the minimum number of old regions to the CSet,
2155             // we are done with this CSet.
2156             ergo_verbose4(ErgoCSetConstruction,
2157                           "finish adding old regions to CSet",
2158                           ergo_format_reason("predicted time is too high")
2159                           ergo_format_ms("predicted time")
2160                           ergo_format_ms("remaining time")
2161                           ergo_format_region("old")
2162                           ergo_format_region("min"),
2163                           predicted_time_ms, time_remaining_ms,
2164                           old_cset_region_length(), min_old_cset_length);
2165             break;
2166           }
2167 
2168           // We'll add it anyway given that we haven't reached the
2169           // minimum number of old regions.
2170           expensive_region_num += 1;
2171         }
2172       } else {
2173         if (old_cset_region_length() >= min_old_cset_length) {
2174           // In the non-auto-tuning case, we'll finish adding regions
2175           // to the CSet if we reach the minimum.
2176           ergo_verbose2(ErgoCSetConstruction,
2177                         "finish adding old regions to CSet",
2178                         ergo_format_reason("old CSet region num reached min")
2179                         ergo_format_region("old")
2180                         ergo_format_region("min"),
2181                         old_cset_region_length(), min_old_cset_length);
2182           break;
2183         }
2184       }
2185 
2186       // We will add this region to the CSet.
2187       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2188       predicted_old_time_ms += predicted_time_ms;
2189       cset_chooser->pop(); // already have region via peek()
2190       _g1->old_set_remove(hr);
2191       add_old_region_to_cset(hr);
2192 
2193       hr = cset_chooser->peek();
2194     }
2195     if (hr == NULL) {
2196       ergo_verbose0(ErgoCSetConstruction,
2197                     "finish adding old regions to CSet",
2198                     ergo_format_reason("candidate old regions not available"));
2199     }
2200 
2201     if (expensive_region_num > 0) {
2202       // We print the information once here at the end, predicated on
2203       // whether we added any apparently expensive regions or not, to
2204       // avoid generating output per region.
2205       ergo_verbose4(ErgoCSetConstruction,
2206                     "added expensive regions to CSet",
2207                     ergo_format_reason("old CSet region num not reached min")
2208                     ergo_format_region("old")
2209                     ergo_format_region("expensive")
2210                     ergo_format_region("min")
2211                     ergo_format_ms("remaining time"),
2212                     old_cset_region_length(),
2213                     expensive_region_num,
2214                     min_old_cset_length,
2215                     time_remaining_ms);
2216     }
2217 
2218     cset_chooser->verify();
2219   }
2220 
2221   stop_incremental_cset_building();
2222 
2223   ergo_verbose3(ErgoCSetConstruction,
2224                 "finish choosing CSet",
2225                 ergo_format_region("old")
2226                 ergo_format_ms("predicted old region time")
2227                 ergo_format_ms("time remaining"),
2228                 old_cset_region_length(),
2229                 predicted_old_time_ms, time_remaining_ms);
2230 
2231   double non_young_end_time_sec = os::elapsedTime();
2232   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2233 }
2234 
2235 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2236   if(TraceYoungGenTime) {
2237     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2238   }
2239 }
2240 
2241 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2242   if(TraceYoungGenTime) {
2243     _all_yield_times_ms.add(yield_time_ms);
2244   }
2245 }
2246 
2247 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2248   if(TraceYoungGenTime) {
2249     _total.add(pause_time_ms);
2250     _other.add(pause_time_ms - phase_times->accounted_time_ms());
2251     _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2252     _parallel.add(phase_times->cur_collection_par_time_ms());
2253     _ext_root_scan.add(phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan));
2254     _satb_filtering.add(phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering));
2255     _update_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS));
2256     _scan_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::ScanRS));
2257     _obj_copy.add(phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy));
2258     _termination.add(phase_times->average_time_ms(G1GCPhaseTimes::Termination));
2259 
2260     double parallel_known_time = phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan) +
2261       phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering) +
2262       phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS) +
2263       phase_times->average_time_ms(G1GCPhaseTimes::ScanRS) +
2264       phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy) +
2265       phase_times->average_time_ms(G1GCPhaseTimes::Termination);
2266 
2267     double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2268     _parallel_other.add(parallel_other_time);
2269     _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2270   }
2271 }
2272 
2273 void TraceYoungGenTimeData::increment_young_collection_count() {
2274   if(TraceYoungGenTime) {
2275     ++_young_pause_num;
2276   }
2277 }
2278 
2279 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2280   if(TraceYoungGenTime) {
2281     ++_mixed_pause_num;
2282   }
2283 }
2284 
2285 void TraceYoungGenTimeData::print_summary(const char* str,
2286                                           const NumberSeq* seq) const {
2287   double sum = seq->sum();
2288   gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2289                 str, sum / 1000.0, seq->avg());
2290 }
2291 
2292 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2293                                              const NumberSeq* seq) const {
2294   print_summary(str, seq);
2295   gclog_or_tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2296                 "(num", seq->num(), seq->sd(), seq->maximum());
2297 }
2298 
2299 void TraceYoungGenTimeData::print() const {
2300   if (!TraceYoungGenTime) {
2301     return;
2302   }
2303 
2304   gclog_or_tty->print_cr("ALL PAUSES");
2305   print_summary_sd("   Total", &_total);
2306   gclog_or_tty->cr();
2307   gclog_or_tty->cr();
2308   gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
2309   gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
2310   gclog_or_tty->cr();
2311 
2312   gclog_or_tty->print_cr("EVACUATION PAUSES");
2313 
2314   if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2315     gclog_or_tty->print_cr("none");
2316   } else {
2317     print_summary_sd("   Evacuation Pauses", &_total);
2318     print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
2319     print_summary("      Parallel Time", &_parallel);
2320     print_summary("         Ext Root Scanning", &_ext_root_scan);
2321     print_summary("         SATB Filtering", &_satb_filtering);
2322     print_summary("         Update RS", &_update_rs);
2323     print_summary("         Scan RS", &_scan_rs);
2324     print_summary("         Object Copy", &_obj_copy);
2325     print_summary("         Termination", &_termination);
2326     print_summary("         Parallel Other", &_parallel_other);
2327     print_summary("      Clear CT", &_clear_ct);
2328     print_summary("      Other", &_other);
2329   }
2330   gclog_or_tty->cr();
2331 
2332   gclog_or_tty->print_cr("MISC");
2333   print_summary_sd("   Stop World", &_all_stop_world_times_ms);
2334   print_summary_sd("   Yields", &_all_yield_times_ms);
2335 }
2336 
2337 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2338   if (TraceOldGenTime) {
2339     _all_full_gc_times.add(full_gc_time_ms);
2340   }
2341 }
2342 
2343 void TraceOldGenTimeData::print() const {
2344   if (!TraceOldGenTime) {
2345     return;
2346   }
2347 
2348   if (_all_full_gc_times.num() > 0) {
2349     gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2350       _all_full_gc_times.num(),
2351       _all_full_gc_times.sum() / 1000.0);
2352     gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2353     gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
2354       _all_full_gc_times.sd(),
2355       _all_full_gc_times.maximum());
2356   }
2357 }