1 /*
   2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  27 #include "gc_implementation/g1/concurrentMark.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  32 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  33 #include "runtime/arguments.hpp"
  34 #include "runtime/java.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "utilities/debug.hpp"
  37 
  38 #define PREDICTIONS_VERBOSE 0
  39 
  40 // <NEW PREDICTION>
  41 
  42 // Different defaults for different number of GC threads
  43 // They were chosen by running GCOld and SPECjbb on debris with different
  44 //   numbers of GC threads and choosing them based on the results
  45 
  46 // all the same
  47 static double rs_length_diff_defaults[] = {
  48   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  49 };
  50 
  51 static double cost_per_card_ms_defaults[] = {
  52   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
  53 };
  54 
  55 // all the same
  56 static double fully_young_cards_per_entry_ratio_defaults[] = {
  57   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
  58 };
  59 
  60 static double cost_per_entry_ms_defaults[] = {
  61   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
  62 };
  63 
  64 static double cost_per_byte_ms_defaults[] = {
  65   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
  66 };
  67 
  68 // these should be pretty consistent
  69 static double constant_other_time_ms_defaults[] = {
  70   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
  71 };
  72 
  73 
  74 static double young_other_cost_per_region_ms_defaults[] = {
  75   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
  76 };
  77 
  78 static double non_young_other_cost_per_region_ms_defaults[] = {
  79   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
  80 };
  81 
  82 // </NEW PREDICTION>
  83 
  84 // Help class for avoiding interleaved logging
  85 class LineBuffer: public StackObj {
  86 
  87 private:
  88   static const int BUFFER_LEN = 1024;
  89   static const int INDENT_CHARS = 3;
  90   char _buffer[BUFFER_LEN];
  91   int _indent_level;
  92   int _cur;
  93 
  94   void vappend(const char* format, va_list ap) {
  95     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
  96     if (res != -1) {
  97       _cur += res;
  98     } else {
  99       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
 100       _buffer[BUFFER_LEN -1] = 0;
 101       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
 102     }
 103   }
 104 
 105 public:
 106   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
 107     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
 108       _buffer[_cur] = ' ';
 109     }
 110   }
 111 
 112 #ifndef PRODUCT
 113   ~LineBuffer() {
 114     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
 115   }
 116 #endif
 117 
 118   void append(const char* format, ...) {
 119     va_list ap;
 120     va_start(ap, format);
 121     vappend(format, ap);
 122     va_end(ap);
 123   }
 124 
 125   void append_and_print_cr(const char* format, ...) {
 126     va_list ap;
 127     va_start(ap, format);
 128     vappend(format, ap);
 129     va_end(ap);
 130     gclog_or_tty->print_cr("%s", _buffer);
 131     _cur = _indent_level * INDENT_CHARS;
 132   }
 133 };
 134 
 135 G1CollectorPolicy::G1CollectorPolicy() :
 136   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
 137                         ? ParallelGCThreads : 1),
 138 
 139   _n_pauses(0),
 140   _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 141   _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 142   _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
 143   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 144   _all_pause_times_ms(new NumberSeq()),
 145   _stop_world_start(0.0),
 146   _all_stop_world_times_ms(new NumberSeq()),
 147   _all_yield_times_ms(new NumberSeq()),
 148 
 149   _all_mod_union_times_ms(new NumberSeq()),
 150 
 151   _summary(new Summary()),
 152 
 153 #ifndef PRODUCT
 154   _cur_clear_ct_time_ms(0.0),
 155   _min_clear_cc_time_ms(-1.0),
 156   _max_clear_cc_time_ms(-1.0),
 157   _cur_clear_cc_time_ms(0.0),
 158   _cum_clear_cc_time_ms(0.0),
 159   _num_cc_clears(0L),
 160 #endif
 161 
 162   _region_num_young(0),
 163   _region_num_tenured(0),
 164   _prev_region_num_young(0),
 165   _prev_region_num_tenured(0),
 166 
 167   _aux_num(10),
 168   _all_aux_times_ms(new NumberSeq[_aux_num]),
 169   _cur_aux_start_times_ms(new double[_aux_num]),
 170   _cur_aux_times_ms(new double[_aux_num]),
 171   _cur_aux_times_set(new bool[_aux_num]),
 172 
 173   _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 174   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 175   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 176 
 177   // <NEW PREDICTION>
 178 
 179   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 180   _prev_collection_pause_end_ms(0.0),
 181   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 182   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 183   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 184   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 185   _partially_young_cards_per_entry_ratio_seq(
 186                                          new TruncatedSeq(TruncatedSeqLength)),
 187   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 188   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 189   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 190   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 191   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 192   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 193   _non_young_other_cost_per_region_ms_seq(
 194                                          new TruncatedSeq(TruncatedSeqLength)),
 195 
 196   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 197   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 198   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 199 
 200   _pause_time_target_ms((double) MaxGCPauseMillis),
 201 
 202   // </NEW PREDICTION>
 203 
 204   _full_young_gcs(true),
 205   _full_young_pause_num(0),
 206   _partial_young_pause_num(0),
 207 
 208   _during_marking(false),
 209   _in_marking_window(false),
 210   _in_marking_window_im(false),
 211 
 212   _known_garbage_ratio(0.0),
 213   _known_garbage_bytes(0),
 214 
 215   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
 216 
 217    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
 218 
 219   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
 220   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
 221 
 222   _recent_avg_pause_time_ratio(0.0),
 223   _num_markings(0),
 224   _n_marks(0),
 225   _n_pauses_at_mark_end(0),
 226 
 227   _all_full_gc_times_ms(new NumberSeq()),
 228 
 229   // G1PausesBtwnConcMark defaults to -1
 230   // so the hack is to do the cast  QQQ FIXME
 231   _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
 232   _n_marks_since_last_pause(0),
 233   _initiate_conc_mark_if_possible(false),
 234   _during_initial_mark_pause(false),
 235   _should_revert_to_full_young_gcs(false),
 236   _last_full_young_gc(false),
 237 
 238   _eden_bytes_before_gc(0),
 239   _survivor_bytes_before_gc(0),
 240   _capacity_before_gc(0),
 241 
 242   _prev_collection_pause_used_at_end_bytes(0),
 243 
 244   _collection_set(NULL),
 245   _collection_set_size(0),
 246   _collection_set_bytes_used_before(0),
 247 
 248   // Incremental CSet attributes
 249   _inc_cset_build_state(Inactive),
 250   _inc_cset_head(NULL),
 251   _inc_cset_tail(NULL),
 252   _inc_cset_size(0),
 253   _inc_cset_young_index(0),
 254   _inc_cset_bytes_used_before(0),
 255   _inc_cset_max_finger(NULL),
 256   _inc_cset_recorded_young_bytes(0),
 257   _inc_cset_recorded_rs_lengths(0),
 258   _inc_cset_predicted_elapsed_time_ms(0.0),
 259   _inc_cset_predicted_bytes_to_copy(0),
 260 
 261 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 262 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 263 #endif // _MSC_VER
 264 
 265   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
 266                                                  G1YoungSurvRateNumRegionsSummary)),
 267   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
 268                                               G1YoungSurvRateNumRegionsSummary)),
 269   // add here any more surv rate groups
 270   _recorded_survivor_regions(0),
 271   _recorded_survivor_head(NULL),
 272   _recorded_survivor_tail(NULL),
 273   _survivors_age_table(true),
 274 
 275   _gc_overhead_perc(0.0)
 276 
 277 {
 278   // Set up the region size and associated fields. Given that the
 279   // policy is created before the heap, we have to set this up here,
 280   // so it's done as soon as possible.
 281   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
 282   HeapRegionRemSet::setup_remset_size();
 283 
 284   // Verify PLAB sizes
 285   const uint region_size = HeapRegion::GrainWords;
 286   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
 287     char buffer[128];
 288     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
 289                  OldPLABSize > region_size ? "Old" : "Young", region_size);
 290     vm_exit_during_initialization(buffer);
 291   }
 292 
 293   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 294   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 295 
 296   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
 297   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
 298   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
 299 
 300   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
 301   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
 302 
 303   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
 304 
 305   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
 306 
 307   _par_last_termination_times_ms = new double[_parallel_gc_threads];
 308   _par_last_termination_attempts = new double[_parallel_gc_threads];
 309   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
 310   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
 311 
 312   // start conservatively
 313   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
 314 
 315   // <NEW PREDICTION>
 316 
 317   int index;
 318   if (ParallelGCThreads == 0)
 319     index = 0;
 320   else if (ParallelGCThreads > 8)
 321     index = 7;
 322   else
 323     index = ParallelGCThreads - 1;
 324 
 325   _pending_card_diff_seq->add(0.0);
 326   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 327   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 328   _fully_young_cards_per_entry_ratio_seq->add(
 329                             fully_young_cards_per_entry_ratio_defaults[index]);
 330   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 331   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 332   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 333   _young_other_cost_per_region_ms_seq->add(
 334                                young_other_cost_per_region_ms_defaults[index]);
 335   _non_young_other_cost_per_region_ms_seq->add(
 336                            non_young_other_cost_per_region_ms_defaults[index]);
 337 
 338   // </NEW PREDICTION>
 339 
 340   // Below, we might need to calculate the pause time target based on
 341   // the pause interval. When we do so we are going to give G1 maximum
 342   // flexibility and allow it to do pauses when it needs to. So, we'll
 343   // arrange that the pause interval to be pause time target + 1 to
 344   // ensure that a) the pause time target is maximized with respect to
 345   // the pause interval and b) we maintain the invariant that pause
 346   // time target < pause interval. If the user does not want this
 347   // maximum flexibility, they will have to set the pause interval
 348   // explicitly.
 349 
 350   // First make sure that, if either parameter is set, its value is
 351   // reasonable.
 352   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 353     if (MaxGCPauseMillis < 1) {
 354       vm_exit_during_initialization("MaxGCPauseMillis should be "
 355                                     "greater than 0");
 356     }
 357   }
 358   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 359     if (GCPauseIntervalMillis < 1) {
 360       vm_exit_during_initialization("GCPauseIntervalMillis should be "
 361                                     "greater than 0");
 362     }
 363   }
 364 
 365   // Then, if the pause time target parameter was not set, set it to
 366   // the default value.
 367   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 368     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 369       // The default pause time target in G1 is 200ms
 370       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
 371     } else {
 372       // We do not allow the pause interval to be set without the
 373       // pause time target
 374       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
 375                                     "without setting MaxGCPauseMillis");
 376     }
 377   }
 378 
 379   // Then, if the interval parameter was not set, set it according to
 380   // the pause time target (this will also deal with the case when the
 381   // pause time target is the default value).
 382   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 383     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 384   }
 385 
 386   // Finally, make sure that the two parameters are consistent.
 387   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 388     char buffer[256];
 389     jio_snprintf(buffer, 256,
 390                  "MaxGCPauseMillis (%u) should be less than "
 391                  "GCPauseIntervalMillis (%u)",
 392                  MaxGCPauseMillis, GCPauseIntervalMillis);
 393     vm_exit_during_initialization(buffer);
 394   }
 395 
 396   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 397   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 398   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 399   _sigma = (double) G1ConfidencePercent / 100.0;
 400 
 401   // start conservatively (around 50ms is about right)
 402   _concurrent_mark_init_times_ms->add(0.05);
 403   _concurrent_mark_remark_times_ms->add(0.05);
 404   _concurrent_mark_cleanup_times_ms->add(0.20);
 405   _tenuring_threshold = MaxTenuringThreshold;
 406 
 407   // if G1FixedSurvivorSpaceSize is 0 which means the size is not
 408   // fixed, then _max_survivor_regions will be calculated at
 409   // calculate_young_list_target_length during initialization
 410   _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
 411 
 412   assert(GCTimeRatio > 0,
 413          "we should have set it to a default value set_g1_gc_flags() "
 414          "if a user set it to 0");
 415   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 416 
 417   initialize_all();
 418 }
 419 
 420 // Increment "i", mod "len"
 421 static void inc_mod(int& i, int len) {
 422   i++; if (i == len) i = 0;
 423 }
 424 
 425 void G1CollectorPolicy::initialize_flags() {
 426   set_min_alignment(HeapRegion::GrainBytes);
 427   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
 428   if (SurvivorRatio < 1) {
 429     vm_exit_during_initialization("Invalid survivor ratio specified");
 430   }
 431   CollectorPolicy::initialize_flags();
 432 }
 433 
 434 // The easiest way to deal with the parsing of the NewSize /
 435 // MaxNewSize / etc. parameteres is to re-use the code in the
 436 // TwoGenerationCollectorPolicy class. This is similar to what
 437 // ParallelScavenge does with its GenerationSizer class (see
 438 // ParallelScavengeHeap::initialize()). We might change this in the
 439 // future, but it's a good start.
 440 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
 441   size_t size_to_region_num(size_t byte_size) {
 442     return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
 443   }
 444 
 445 public:
 446   G1YoungGenSizer() {
 447     initialize_flags();
 448     initialize_size_info();
 449   }
 450 
 451   size_t min_young_region_num() {
 452     return size_to_region_num(_min_gen0_size);
 453   }
 454   size_t initial_young_region_num() {
 455     return size_to_region_num(_initial_gen0_size);
 456   }
 457   size_t max_young_region_num() {
 458     return size_to_region_num(_max_gen0_size);
 459   }
 460 };
 461 
 462 void G1CollectorPolicy::init() {
 463   // Set aside an initial future to_space.
 464   _g1 = G1CollectedHeap::heap();
 465 
 466   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 467 
 468   initialize_gc_policy_counters();
 469 
 470   G1YoungGenSizer sizer;
 471   size_t initial_region_num = sizer.initial_young_region_num();
 472 
 473   if (UseAdaptiveSizePolicy) {
 474     set_adaptive_young_list_length(true);
 475     _young_list_fixed_length = 0;
 476   } else {
 477     set_adaptive_young_list_length(false);
 478     _young_list_fixed_length = initial_region_num;
 479   }
 480   _free_regions_at_end_of_collection = _g1->free_regions();
 481   calculate_young_list_min_length();
 482   guarantee( _young_list_min_length == 0, "invariant, not enough info" );
 483   calculate_young_list_target_length();
 484 
 485   // We may immediately start allocating regions and placing them on the
 486   // collection set list. Initialize the per-collection set info
 487   start_incremental_cset_building();
 488 }
 489 
 490 // Create the jstat counters for the policy.
 491 void G1CollectorPolicy::initialize_gc_policy_counters()
 492 {
 493   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 494 }
 495 
 496 void G1CollectorPolicy::calculate_young_list_min_length() {
 497   _young_list_min_length = 0;
 498 
 499   if (!adaptive_young_list_length())
 500     return;
 501 
 502   if (_alloc_rate_ms_seq->num() > 3) {
 503     double now_sec = os::elapsedTime();
 504     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 505     double alloc_rate_ms = predict_alloc_rate_ms();
 506     size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
 507     size_t current_region_num = _g1->young_list()->length();
 508     _young_list_min_length = min_regions + current_region_num;
 509   }
 510 }
 511 
 512 void G1CollectorPolicy::calculate_young_list_target_length() {
 513   if (adaptive_young_list_length()) {
 514     size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
 515     calculate_young_list_target_length(rs_lengths);
 516   } else {
 517     if (full_young_gcs())
 518       _young_list_target_length = _young_list_fixed_length;
 519     else
 520       _young_list_target_length = _young_list_fixed_length / 2;
 521   }
 522 
 523   // Make sure we allow the application to allocate at least one
 524   // region before we need to do a collection again.
 525   size_t min_length = _g1->young_list()->length() + 1;
 526   _young_list_target_length = MAX2(_young_list_target_length, min_length);
 527   calculate_max_gc_locker_expansion();
 528   calculate_survivors_policy();
 529 }
 530 
 531 void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
 532   guarantee( adaptive_young_list_length(), "pre-condition" );
 533   guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
 534 
 535   double start_time_sec = os::elapsedTime();
 536   size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
 537   min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
 538   size_t reserve_regions =
 539     (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
 540 
 541   if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
 542     // we are in fully-young mode and there are free regions in the heap
 543 
 544     double survivor_regions_evac_time =
 545         predict_survivor_regions_evac_time();
 546 
 547     double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 548     size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
 549     size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
 550     size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
 551     double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
 552                           + survivor_regions_evac_time;
 553 
 554     // the result
 555     size_t final_young_length = 0;
 556 
 557     size_t init_free_regions =
 558       MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
 559 
 560     // if we're still under the pause target...
 561     if (base_time_ms <= target_pause_time_ms) {
 562       // We make sure that the shortest young length that makes sense
 563       // fits within the target pause time.
 564       size_t min_young_length = 1;
 565 
 566       if (predict_will_fit(min_young_length, base_time_ms,
 567                                      init_free_regions, target_pause_time_ms)) {
 568         // The shortest young length will fit within the target pause time;
 569         // we'll now check whether the absolute maximum number of young
 570         // regions will fit in the target pause time. If not, we'll do
 571         // a binary search between min_young_length and max_young_length
 572         size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
 573         size_t max_young_length = abs_max_young_length;
 574 
 575         if (max_young_length > min_young_length) {
 576           // Let's check if the initial max young length will fit within the
 577           // target pause. If so then there is no need to search for a maximal
 578           // young length - we'll return the initial maximum
 579 
 580           if (predict_will_fit(max_young_length, base_time_ms,
 581                                 init_free_regions, target_pause_time_ms)) {
 582             // The maximum young length will satisfy the target pause time.
 583             // We are done so set min young length to this maximum length.
 584             // The code after the loop will then set final_young_length using
 585             // the value cached in the minimum length.
 586             min_young_length = max_young_length;
 587           } else {
 588             // The maximum possible number of young regions will not fit within
 589             // the target pause time so let's search....
 590 
 591             size_t diff = (max_young_length - min_young_length) / 2;
 592             max_young_length = min_young_length + diff;
 593 
 594             while (max_young_length > min_young_length) {
 595               if (predict_will_fit(max_young_length, base_time_ms,
 596                                         init_free_regions, target_pause_time_ms)) {
 597 
 598                 // The current max young length will fit within the target
 599                 // pause time. Note we do not exit the loop here. By setting
 600                 // min = max, and then increasing the max below means that
 601                 // we will continue searching for an upper bound in the
 602                 // range [max..max+diff]
 603                 min_young_length = max_young_length;
 604               }
 605               diff = (max_young_length - min_young_length) / 2;
 606               max_young_length = min_young_length + diff;
 607             }
 608             // the above loop found a maximal young length that will fit
 609             // within the target pause time.
 610           }
 611           assert(min_young_length <= abs_max_young_length, "just checking");
 612         }
 613         final_young_length = min_young_length;
 614       }
 615     }
 616     // and we're done!
 617 
 618     // we should have at least one region in the target young length
 619     _young_list_target_length =
 620                               final_young_length + _recorded_survivor_regions;
 621 
 622     // let's keep an eye of how long we spend on this calculation
 623     // right now, I assume that we'll print it when we need it; we
 624     // should really adde it to the breakdown of a pause
 625     double end_time_sec = os::elapsedTime();
 626     double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
 627 
 628 #ifdef TRACE_CALC_YOUNG_LENGTH
 629     // leave this in for debugging, just in case
 630     gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
 631                            "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
 632                            target_pause_time_ms,
 633                            _young_list_target_length
 634                            elapsed_time_ms,
 635                            full_young_gcs() ? "full" : "partial",
 636                            during_initial_mark_pause() ? " i-m" : "",
 637                            _in_marking_window,
 638                            _in_marking_window_im);
 639 #endif // TRACE_CALC_YOUNG_LENGTH
 640 
 641     if (_young_list_target_length < _young_list_min_length) {
 642       // bummer; this means that, if we do a pause when the maximal
 643       // length dictates, we'll violate the pause spacing target (the
 644       // min length was calculate based on the application's current
 645       // alloc rate);
 646 
 647       // so, we have to bite the bullet, and allocate the minimum
 648       // number. We'll violate our target, but we just can't meet it.
 649 
 650 #ifdef TRACE_CALC_YOUNG_LENGTH
 651       // leave this in for debugging, just in case
 652       gclog_or_tty->print_cr("adjusted target length from "
 653                              SIZE_FORMAT " to " SIZE_FORMAT,
 654                              _young_list_target_length, _young_list_min_length);
 655 #endif // TRACE_CALC_YOUNG_LENGTH
 656 
 657       _young_list_target_length = _young_list_min_length;
 658     }
 659   } else {
 660     // we are in a partially-young mode or we've run out of regions (due
 661     // to evacuation failure)
 662 
 663 #ifdef TRACE_CALC_YOUNG_LENGTH
 664     // leave this in for debugging, just in case
 665     gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
 666                            _young_list_min_length);
 667 #endif // TRACE_CALC_YOUNG_LENGTH
 668     // we'll do the pause as soon as possible by choosing the minimum
 669     _young_list_target_length = _young_list_min_length;
 670   }
 671 
 672   _rs_lengths_prediction = rs_lengths;
 673 }
 674 
 675 // This is used by: calculate_young_list_target_length(rs_length). It
 676 // returns true iff:
 677 //   the predicted pause time for the given young list will not overflow
 678 //   the target pause time
 679 // and:
 680 //   the predicted amount of surviving data will not overflow the
 681 //   the amount of free space available for survivor regions.
 682 //
 683 bool
 684 G1CollectorPolicy::predict_will_fit(size_t young_length,
 685                                     double base_time_ms,
 686                                     size_t init_free_regions,
 687                                     double target_pause_time_ms) {
 688 
 689   if (young_length >= init_free_regions)
 690     // end condition 1: not enough space for the young regions
 691     return false;
 692 
 693   double accum_surv_rate_adj = 0.0;
 694   double accum_surv_rate =
 695     accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
 696 
 697   size_t bytes_to_copy =
 698     (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 699 
 700   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
 701 
 702   double young_other_time_ms =
 703                        predict_young_other_time_ms(young_length);
 704 
 705   double pause_time_ms =
 706                    base_time_ms + copy_time_ms + young_other_time_ms;
 707 
 708   if (pause_time_ms > target_pause_time_ms)
 709     // end condition 2: over the target pause time
 710     return false;
 711 
 712   size_t free_bytes =
 713                  (init_free_regions - young_length) * HeapRegion::GrainBytes;
 714 
 715   if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes)
 716     // end condition 3: out of to-space (conservatively)
 717     return false;
 718 
 719   // success!
 720   return true;
 721 }
 722 
 723 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
 724   double survivor_regions_evac_time = 0.0;
 725   for (HeapRegion * r = _recorded_survivor_head;
 726        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
 727        r = r->get_next_young_region()) {
 728     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
 729   }
 730   return survivor_regions_evac_time;
 731 }
 732 
 733 void G1CollectorPolicy::check_prediction_validity() {
 734   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 735 
 736   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
 737   if (rs_lengths > _rs_lengths_prediction) {
 738     // add 10% to avoid having to recalculate often
 739     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 740     calculate_young_list_target_length(rs_lengths_prediction);
 741   }
 742 }
 743 
 744 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
 745                                                bool is_tlab,
 746                                                bool* gc_overhead_limit_was_exceeded) {
 747   guarantee(false, "Not using this policy feature yet.");
 748   return NULL;
 749 }
 750 
 751 // This method controls how a collector handles one or more
 752 // of its generations being fully allocated.
 753 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
 754                                                        bool is_tlab) {
 755   guarantee(false, "Not using this policy feature yet.");
 756   return NULL;
 757 }
 758 
 759 
 760 #ifndef PRODUCT
 761 bool G1CollectorPolicy::verify_young_ages() {
 762   HeapRegion* head = _g1->young_list()->first_region();
 763   return
 764     verify_young_ages(head, _short_lived_surv_rate_group);
 765   // also call verify_young_ages on any additional surv rate groups
 766 }
 767 
 768 bool
 769 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 770                                      SurvRateGroup *surv_rate_group) {
 771   guarantee( surv_rate_group != NULL, "pre-condition" );
 772 
 773   const char* name = surv_rate_group->name();
 774   bool ret = true;
 775   int prev_age = -1;
 776 
 777   for (HeapRegion* curr = head;
 778        curr != NULL;
 779        curr = curr->get_next_young_region()) {
 780     SurvRateGroup* group = curr->surv_rate_group();
 781     if (group == NULL && !curr->is_survivor()) {
 782       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
 783       ret = false;
 784     }
 785 
 786     if (surv_rate_group == group) {
 787       int age = curr->age_in_surv_rate_group();
 788 
 789       if (age < 0) {
 790         gclog_or_tty->print_cr("## %s: encountered negative age", name);
 791         ret = false;
 792       }
 793 
 794       if (age <= prev_age) {
 795         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
 796                                "(%d, %d)", name, age, prev_age);
 797         ret = false;
 798       }
 799       prev_age = age;
 800     }
 801   }
 802 
 803   return ret;
 804 }
 805 #endif // PRODUCT
 806 
 807 void G1CollectorPolicy::record_full_collection_start() {
 808   _cur_collection_start_sec = os::elapsedTime();
 809   // Release the future to-space so that it is available for compaction into.
 810   _g1->set_full_collection();
 811 }
 812 
 813 void G1CollectorPolicy::record_full_collection_end() {
 814   // Consider this like a collection pause for the purposes of allocation
 815   // since last pause.
 816   double end_sec = os::elapsedTime();
 817   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
 818   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 819 
 820   _all_full_gc_times_ms->add(full_gc_time_ms);
 821 
 822   update_recent_gc_times(end_sec, full_gc_time_ms);
 823 
 824   _g1->clear_full_collection();
 825 
 826   // "Nuke" the heuristics that control the fully/partially young GC
 827   // transitions and make sure we start with fully young GCs after the
 828   // Full GC.
 829   set_full_young_gcs(true);
 830   _last_full_young_gc = false;
 831   _should_revert_to_full_young_gcs = false;
 832   clear_initiate_conc_mark_if_possible();
 833   clear_during_initial_mark_pause();
 834   _known_garbage_bytes = 0;
 835   _known_garbage_ratio = 0.0;
 836   _in_marking_window = false;
 837   _in_marking_window_im = false;
 838 
 839   _short_lived_surv_rate_group->start_adding_regions();
 840   // also call this on any additional surv rate groups
 841 
 842   record_survivor_regions(0, NULL, NULL);
 843 
 844   _prev_region_num_young   = _region_num_young;
 845   _prev_region_num_tenured = _region_num_tenured;
 846 
 847   _free_regions_at_end_of_collection = _g1->free_regions();
 848   // Reset survivors SurvRateGroup.
 849   _survivor_surv_rate_group->reset();
 850   calculate_young_list_min_length();
 851   calculate_young_list_target_length();
 852 }
 853 
 854 void G1CollectorPolicy::record_stop_world_start() {
 855   _stop_world_start = os::elapsedTime();
 856 }
 857 
 858 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
 859                                                       size_t start_used) {
 860   if (PrintGCDetails) {
 861     gclog_or_tty->stamp(PrintGCTimeStamps);
 862     gclog_or_tty->print("[GC pause");
 863     gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
 864   }
 865 
 866   assert(_g1->used() == _g1->recalculate_used(),
 867          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 868                  _g1->used(), _g1->recalculate_used()));
 869 
 870   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 871   _all_stop_world_times_ms->add(s_w_t_ms);
 872   _stop_world_start = 0.0;
 873 
 874   _cur_collection_start_sec = start_time_sec;
 875   _cur_collection_pause_used_at_start_bytes = start_used;
 876   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
 877   _pending_cards = _g1->pending_card_num();
 878   _max_pending_cards = _g1->max_pending_card_num();
 879 
 880   _bytes_in_collection_set_before_gc = 0;
 881   _bytes_copied_during_gc = 0;
 882 
 883   YoungList* young_list = _g1->young_list();
 884   _eden_bytes_before_gc = young_list->eden_used_bytes();
 885   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
 886   _capacity_before_gc = _g1->capacity();
 887 
 888 #ifdef DEBUG
 889   // initialise these to something well known so that we can spot
 890   // if they are not set properly
 891 
 892   for (int i = 0; i < _parallel_gc_threads; ++i) {
 893     _par_last_gc_worker_start_times_ms[i] = -1234.0;
 894     _par_last_ext_root_scan_times_ms[i] = -1234.0;
 895     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
 896     _par_last_update_rs_times_ms[i] = -1234.0;
 897     _par_last_update_rs_processed_buffers[i] = -1234.0;
 898     _par_last_scan_rs_times_ms[i] = -1234.0;
 899     _par_last_obj_copy_times_ms[i] = -1234.0;
 900     _par_last_termination_times_ms[i] = -1234.0;
 901     _par_last_termination_attempts[i] = -1234.0;
 902     _par_last_gc_worker_end_times_ms[i] = -1234.0;
 903     _par_last_gc_worker_times_ms[i] = -1234.0;
 904   }
 905 #endif
 906 
 907   for (int i = 0; i < _aux_num; ++i) {
 908     _cur_aux_times_ms[i] = 0.0;
 909     _cur_aux_times_set[i] = false;
 910   }
 911 
 912   _satb_drain_time_set = false;
 913   _last_satb_drain_processed_buffers = -1;
 914 
 915   _last_young_gc_full = false;
 916 
 917   // do that for any other surv rate groups
 918   _short_lived_surv_rate_group->stop_adding_regions();
 919   _survivors_age_table.clear();
 920 
 921   assert( verify_young_ages(), "region age verification" );
 922 }
 923 
 924 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
 925   _mark_closure_time_ms = mark_closure_time_ms;
 926 }
 927 
 928 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
 929                                                    mark_init_elapsed_time_ms) {
 930   _during_marking = true;
 931   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
 932   clear_during_initial_mark_pause();
 933   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 934 }
 935 
 936 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 937   _mark_remark_start_sec = os::elapsedTime();
 938   _during_marking = false;
 939 }
 940 
 941 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 942   double end_time_sec = os::elapsedTime();
 943   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 944   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 945   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 946   _prev_collection_pause_end_ms += elapsed_time_ms;
 947 
 948   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
 949 }
 950 
 951 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 952   _mark_cleanup_start_sec = os::elapsedTime();
 953 }
 954 
 955 void
 956 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
 957                                                       size_t max_live_bytes) {
 958   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
 959   record_concurrent_mark_cleanup_end_work2();
 960 }
 961 
 962 void
 963 G1CollectorPolicy::
 964 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
 965                                          size_t max_live_bytes) {
 966   if (_n_marks < 2) _n_marks++;
 967   if (G1PolicyVerbose > 0)
 968     gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB "
 969                            " (of " SIZE_FORMAT " MB heap).",
 970                            max_live_bytes/M, _g1->capacity()/M);
 971 }
 972 
 973 // The important thing about this is that it includes "os::elapsedTime".
 974 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
 975   double end_time_sec = os::elapsedTime();
 976   double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
 977   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
 978   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 979   _prev_collection_pause_end_ms += elapsed_time_ms;
 980 
 981   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
 982 
 983   _num_markings++;
 984 
 985   // We did a marking, so reset the "since_last_mark" variables.
 986   double considerConcMarkCost = 1.0;
 987   // If there are available processors, concurrent activity is free...
 988   if (Threads::number_of_non_daemon_threads() * 2 <
 989       os::active_processor_count()) {
 990     considerConcMarkCost = 0.0;
 991   }
 992   _n_pauses_at_mark_end = _n_pauses;
 993   _n_marks_since_last_pause++;
 994 }
 995 
 996 void
 997 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 998   _should_revert_to_full_young_gcs = false;
 999   _last_full_young_gc = true;
1000   _in_marking_window = false;
1001   if (adaptive_young_list_length())
1002     calculate_young_list_target_length();
1003 }
1004 
1005 void G1CollectorPolicy::record_concurrent_pause() {
1006   if (_stop_world_start > 0.0) {
1007     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
1008     _all_yield_times_ms->add(yield_ms);
1009   }
1010 }
1011 
1012 void G1CollectorPolicy::record_concurrent_pause_end() {
1013 }
1014 
1015 template<class T>
1016 T sum_of(T* sum_arr, int start, int n, int N) {
1017   T sum = (T)0;
1018   for (int i = 0; i < n; i++) {
1019     int j = (start + i) % N;
1020     sum += sum_arr[j];
1021   }
1022   return sum;
1023 }
1024 
1025 void G1CollectorPolicy::print_par_stats(int level,
1026                                         const char* str,
1027                                         double* data) {
1028   double min = data[0], max = data[0];
1029   double total = 0.0;
1030   LineBuffer buf(level);
1031   buf.append("[%s (ms):", str);
1032   for (uint i = 0; i < ParallelGCThreads; ++i) {
1033     double val = data[i];
1034     if (val < min)
1035       min = val;
1036     if (val > max)
1037       max = val;
1038     total += val;
1039     buf.append("  %3.1lf", val);
1040   }
1041   buf.append_and_print_cr("");
1042   double avg = total / (double) ParallelGCThreads;
1043   buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
1044     avg, min, max, max - min);
1045 }
1046 
1047 void G1CollectorPolicy::print_par_sizes(int level,
1048                                         const char* str,
1049                                         double* data) {
1050   double min = data[0], max = data[0];
1051   double total = 0.0;
1052   LineBuffer buf(level);
1053   buf.append("[%s :", str);
1054   for (uint i = 0; i < ParallelGCThreads; ++i) {
1055     double val = data[i];
1056     if (val < min)
1057       min = val;
1058     if (val > max)
1059       max = val;
1060     total += val;
1061     buf.append(" %d", (int) val);
1062   }
1063   buf.append_and_print_cr("");
1064   double avg = total / (double) ParallelGCThreads;
1065   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
1066     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
1067 }
1068 
1069 void G1CollectorPolicy::print_stats (int level,
1070                                      const char* str,
1071                                      double value) {
1072   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
1073 }
1074 
1075 void G1CollectorPolicy::print_stats (int level,
1076                                      const char* str,
1077                                      int value) {
1078   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
1079 }
1080 
1081 double G1CollectorPolicy::avg_value (double* data) {
1082   if (G1CollectedHeap::use_parallel_gc_threads()) {
1083     double ret = 0.0;
1084     for (uint i = 0; i < ParallelGCThreads; ++i)
1085       ret += data[i];
1086     return ret / (double) ParallelGCThreads;
1087   } else {
1088     return data[0];
1089   }
1090 }
1091 
1092 double G1CollectorPolicy::max_value (double* data) {
1093   if (G1CollectedHeap::use_parallel_gc_threads()) {
1094     double ret = data[0];
1095     for (uint i = 1; i < ParallelGCThreads; ++i)
1096       if (data[i] > ret)
1097         ret = data[i];
1098     return ret;
1099   } else {
1100     return data[0];
1101   }
1102 }
1103 
1104 double G1CollectorPolicy::sum_of_values (double* data) {
1105   if (G1CollectedHeap::use_parallel_gc_threads()) {
1106     double sum = 0.0;
1107     for (uint i = 0; i < ParallelGCThreads; i++)
1108       sum += data[i];
1109     return sum;
1110   } else {
1111     return data[0];
1112   }
1113 }
1114 
1115 double G1CollectorPolicy::max_sum (double* data1,
1116                                    double* data2) {
1117   double ret = data1[0] + data2[0];
1118 
1119   if (G1CollectedHeap::use_parallel_gc_threads()) {
1120     for (uint i = 1; i < ParallelGCThreads; ++i) {
1121       double data = data1[i] + data2[i];
1122       if (data > ret)
1123         ret = data;
1124     }
1125   }
1126   return ret;
1127 }
1128 
1129 // Anything below that is considered to be zero
1130 #define MIN_TIMER_GRANULARITY 0.0000001
1131 
1132 void G1CollectorPolicy::record_collection_pause_end() {
1133   double end_time_sec = os::elapsedTime();
1134   double elapsed_ms = _last_pause_time_ms;
1135   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1136   size_t rs_size =
1137     _cur_collection_pause_used_regions_at_start - collection_set_size();
1138   size_t cur_used_bytes = _g1->used();
1139   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1140   bool last_pause_included_initial_mark = false;
1141   bool update_stats = !_g1->evacuation_failed();
1142 
1143 #ifndef PRODUCT
1144   if (G1YoungSurvRateVerbose) {
1145     gclog_or_tty->print_cr("");
1146     _short_lived_surv_rate_group->print();
1147     // do that for any other surv rate groups too
1148   }
1149 #endif // PRODUCT
1150 
1151   last_pause_included_initial_mark = during_initial_mark_pause();
1152   if (last_pause_included_initial_mark)
1153     record_concurrent_mark_init_end_pre(0.0);
1154 
1155   size_t min_used_targ =
1156     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
1157 
1158 
1159   if (!_g1->mark_in_progress() && !_last_full_young_gc) {
1160     assert(!last_pause_included_initial_mark, "invariant");
1161     if (cur_used_bytes > min_used_targ &&
1162       cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
1163         assert(!during_initial_mark_pause(), "we should not see this here");
1164 
1165         // Note: this might have already been set, if during the last
1166         // pause we decided to start a cycle but at the beginning of
1167         // this pause we decided to postpone it. That's OK.
1168         set_initiate_conc_mark_if_possible();
1169     }
1170   }
1171 
1172   _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
1173 
1174   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
1175                           end_time_sec, false);
1176 
1177   guarantee(_cur_collection_pause_used_regions_at_start >=
1178             collection_set_size(),
1179             "Negative RS size?");
1180 
1181   // This assert is exempted when we're doing parallel collection pauses,
1182   // because the fragmentation caused by the parallel GC allocation buffers
1183   // can lead to more memory being used during collection than was used
1184   // before. Best leave this out until the fragmentation problem is fixed.
1185   // Pauses in which evacuation failed can also lead to negative
1186   // collections, since no space is reclaimed from a region containing an
1187   // object whose evacuation failed.
1188   // Further, we're now always doing parallel collection.  But I'm still
1189   // leaving this here as a placeholder for a more precise assertion later.
1190   // (DLD, 10/05.)
1191   assert((true || parallel) // Always using GC LABs now.
1192          || _g1->evacuation_failed()
1193          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
1194          "Negative collection");
1195 
1196   size_t freed_bytes =
1197     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
1198   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
1199 
1200   double survival_fraction =
1201     (double)surviving_bytes/
1202     (double)_collection_set_bytes_used_before;
1203 
1204   _n_pauses++;
1205 
1206   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
1207   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
1208   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
1209   double update_rs_processed_buffers =
1210     sum_of_values(_par_last_update_rs_processed_buffers);
1211   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
1212   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
1213   double termination_time = avg_value(_par_last_termination_times_ms);
1214 
1215   double parallel_known_time = update_rs_time +
1216                                ext_root_scan_time +
1217                                mark_stack_scan_time +
1218                                scan_rs_time +
1219                                obj_copy_time +
1220                                termination_time;
1221 
1222   double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
1223 
1224   PauseSummary* summary = _summary;
1225 
1226   if (update_stats) {
1227     _recent_rs_scan_times_ms->add(scan_rs_time);
1228     _recent_pause_times_ms->add(elapsed_ms);
1229     _recent_rs_sizes->add(rs_size);
1230 
1231     MainBodySummary* body_summary = summary->main_body_summary();
1232     guarantee(body_summary != NULL, "should not be null!");
1233 
1234     if (_satb_drain_time_set)
1235       body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
1236     else
1237       body_summary->record_satb_drain_time_ms(0.0);
1238 
1239     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
1240     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
1241     body_summary->record_update_rs_time_ms(update_rs_time);
1242     body_summary->record_scan_rs_time_ms(scan_rs_time);
1243     body_summary->record_obj_copy_time_ms(obj_copy_time);
1244     if (parallel) {
1245       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
1246       body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
1247       body_summary->record_termination_time_ms(termination_time);
1248       body_summary->record_parallel_other_time_ms(parallel_other_time);
1249     }
1250     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
1251 
1252     // We exempt parallel collection from this check because Alloc Buffer
1253     // fragmentation can produce negative collections.  Same with evac
1254     // failure.
1255     // Further, we're now always doing parallel collection.  But I'm still
1256     // leaving this here as a placeholder for a more precise assertion later.
1257     // (DLD, 10/05.
1258     assert((true || parallel)
1259            || _g1->evacuation_failed()
1260            || surviving_bytes <= _collection_set_bytes_used_before,
1261            "Or else negative collection!");
1262     _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
1263     _recent_CS_bytes_surviving->add(surviving_bytes);
1264 
1265     // this is where we update the allocation rate of the application
1266     double app_time_ms =
1267       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
1268     if (app_time_ms < MIN_TIMER_GRANULARITY) {
1269       // This usually happens due to the timer not having the required
1270       // granularity. Some Linuxes are the usual culprits.
1271       // We'll just set it to something (arbitrarily) small.
1272       app_time_ms = 1.0;
1273     }
1274     size_t regions_allocated =
1275       (_region_num_young - _prev_region_num_young) +
1276       (_region_num_tenured - _prev_region_num_tenured);
1277     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1278     _alloc_rate_ms_seq->add(alloc_rate_ms);
1279     _prev_region_num_young   = _region_num_young;
1280     _prev_region_num_tenured = _region_num_tenured;
1281 
1282     double interval_ms =
1283       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1284     update_recent_gc_times(end_time_sec, elapsed_ms);
1285     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1286     if (recent_avg_pause_time_ratio() < 0.0 ||
1287         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1288 #ifndef PRODUCT
1289       // Dump info to allow post-facto debugging
1290       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
1291       gclog_or_tty->print_cr("-------------------------------------------");
1292       gclog_or_tty->print_cr("Recent GC Times (ms):");
1293       _recent_gc_times_ms->dump();
1294       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
1295       _recent_prev_end_times_for_all_gcs_sec->dump();
1296       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
1297                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
1298       // In debug mode, terminate the JVM if the user wants to debug at this point.
1299       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
1300 #endif  // !PRODUCT
1301       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1302       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1303       if (_recent_avg_pause_time_ratio < 0.0) {
1304         _recent_avg_pause_time_ratio = 0.0;
1305       } else {
1306         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1307         _recent_avg_pause_time_ratio = 1.0;
1308       }
1309     }
1310   }
1311 
1312   if (G1PolicyVerbose > 1) {
1313     gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
1314   }
1315 
1316   if (G1PolicyVerbose > 1) {
1317     gclog_or_tty->print_cr("      ET: %10.6f ms           (avg: %10.6f ms)\n"
1318                            "       ET-RS:  %10.6f ms      (avg: %10.6f ms)\n"
1319                            "      |RS|: " SIZE_FORMAT,
1320                            elapsed_ms, recent_avg_time_for_pauses_ms(),
1321                            scan_rs_time, recent_avg_time_for_rs_scan_ms(),
1322                            rs_size);
1323 
1324     gclog_or_tty->print_cr("       Used at start: " SIZE_FORMAT"K"
1325                            "       At end " SIZE_FORMAT "K\n"
1326                            "       garbage      : " SIZE_FORMAT "K"
1327                            "       of     " SIZE_FORMAT "K\n"
1328                            "       survival     : %6.2f%%  (%6.2f%% avg)",
1329                            _cur_collection_pause_used_at_start_bytes/K,
1330                            _g1->used()/K, freed_bytes/K,
1331                            _collection_set_bytes_used_before/K,
1332                            survival_fraction*100.0,
1333                            recent_avg_survival_fraction()*100.0);
1334     gclog_or_tty->print_cr("       Recent %% gc pause time: %6.2f",
1335                            recent_avg_pause_time_ratio() * 100.0);
1336   }
1337 
1338   double other_time_ms = elapsed_ms;
1339 
1340   if (_satb_drain_time_set) {
1341     other_time_ms -= _cur_satb_drain_time_ms;
1342   }
1343 
1344   if (parallel) {
1345     other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
1346   } else {
1347     other_time_ms -=
1348       update_rs_time +
1349       ext_root_scan_time + mark_stack_scan_time +
1350       scan_rs_time + obj_copy_time;
1351   }
1352 
1353   if (PrintGCDetails) {
1354     gclog_or_tty->print_cr("%s, %1.8lf secs]",
1355                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
1356                            elapsed_ms / 1000.0);
1357 
1358     if (_satb_drain_time_set) {
1359       print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
1360     }
1361     if (_last_satb_drain_processed_buffers >= 0) {
1362       print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
1363     }
1364     if (parallel) {
1365       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
1366       print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
1367       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
1368       print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
1369       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
1370       print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
1371       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
1372       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
1373       print_par_stats(2, "Termination", _par_last_termination_times_ms);
1374       print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
1375       print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
1376 
1377       for (int i = 0; i < _parallel_gc_threads; i++) {
1378         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
1379       }
1380       print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
1381 
1382       print_stats(2, "Parallel Other", parallel_other_time);
1383       print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
1384     } else {
1385       print_stats(1, "Update RS", update_rs_time);
1386       print_stats(2, "Processed Buffers",
1387                   (int)update_rs_processed_buffers);
1388       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
1389       print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
1390       print_stats(1, "Scan RS", scan_rs_time);
1391       print_stats(1, "Object Copying", obj_copy_time);
1392     }
1393 #ifndef PRODUCT
1394     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
1395     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
1396     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
1397     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
1398     if (_num_cc_clears > 0) {
1399       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
1400     }
1401 #endif
1402     print_stats(1, "Other", other_time_ms);
1403     print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
1404 
1405     for (int i = 0; i < _aux_num; ++i) {
1406       if (_cur_aux_times_set[i]) {
1407         char buffer[96];
1408         sprintf(buffer, "Aux%d", i);
1409         print_stats(1, buffer, _cur_aux_times_ms[i]);
1410       }
1411     }
1412   }
1413 
1414   _all_pause_times_ms->add(elapsed_ms);
1415   if (update_stats) {
1416     summary->record_total_time_ms(elapsed_ms);
1417     summary->record_other_time_ms(other_time_ms);
1418   }
1419   for (int i = 0; i < _aux_num; ++i)
1420     if (_cur_aux_times_set[i])
1421       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
1422 
1423   // Reset marks-between-pauses counter.
1424   _n_marks_since_last_pause = 0;
1425 
1426   // Update the efficiency-since-mark vars.
1427   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
1428   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
1429     // This usually happens due to the timer not having the required
1430     // granularity. Some Linuxes are the usual culprits.
1431     // We'll just set it to something (arbitrarily) small.
1432     proc_ms = 1.0;
1433   }
1434   double cur_efficiency = (double) freed_bytes / proc_ms;
1435 
1436   bool new_in_marking_window = _in_marking_window;
1437   bool new_in_marking_window_im = false;
1438   if (during_initial_mark_pause()) {
1439     new_in_marking_window = true;
1440     new_in_marking_window_im = true;
1441   }
1442 
1443   if (_last_full_young_gc) {
1444     set_full_young_gcs(false);
1445     _last_full_young_gc = false;
1446   }
1447 
1448   if ( !_last_young_gc_full ) {
1449     if ( _should_revert_to_full_young_gcs ||
1450       _known_garbage_ratio < 0.05 ||
1451       (adaptive_young_list_length() &&
1452       (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
1453         set_full_young_gcs(true);
1454     }
1455   }
1456   _should_revert_to_full_young_gcs = false;
1457 
1458   if (_last_young_gc_full && !_during_marking) {
1459     _young_gc_eff_seq->add(cur_efficiency);
1460   }
1461 
1462   _short_lived_surv_rate_group->start_adding_regions();
1463   // do that for any other surv rate groupsx
1464 
1465   // <NEW PREDICTION>
1466 
1467   if (update_stats) {
1468     double pause_time_ms = elapsed_ms;
1469 
1470     size_t diff = 0;
1471     if (_max_pending_cards >= _pending_cards)
1472       diff = _max_pending_cards - _pending_cards;
1473     _pending_card_diff_seq->add((double) diff);
1474 
1475     double cost_per_card_ms = 0.0;
1476     if (_pending_cards > 0) {
1477       cost_per_card_ms = update_rs_time / (double) _pending_cards;
1478       _cost_per_card_ms_seq->add(cost_per_card_ms);
1479     }
1480 
1481     size_t cards_scanned = _g1->cards_scanned();
1482 
1483     double cost_per_entry_ms = 0.0;
1484     if (cards_scanned > 10) {
1485       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
1486       if (_last_young_gc_full)
1487         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1488       else
1489         _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1490     }
1491 
1492     if (_max_rs_lengths > 0) {
1493       double cards_per_entry_ratio =
1494         (double) cards_scanned / (double) _max_rs_lengths;
1495       if (_last_young_gc_full)
1496         _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1497       else
1498         _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1499     }
1500 
1501     size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1502     if (rs_length_diff >= 0)
1503       _rs_length_diff_seq->add((double) rs_length_diff);
1504 
1505     size_t copied_bytes = surviving_bytes;
1506     double cost_per_byte_ms = 0.0;
1507     if (copied_bytes > 0) {
1508       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
1509       if (_in_marking_window)
1510         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1511       else
1512         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1513     }
1514 
1515     double all_other_time_ms = pause_time_ms -
1516       (update_rs_time + scan_rs_time + obj_copy_time +
1517        _mark_closure_time_ms + termination_time);
1518 
1519     double young_other_time_ms = 0.0;
1520     if (_recorded_young_regions > 0) {
1521       young_other_time_ms =
1522         _recorded_young_cset_choice_time_ms +
1523         _recorded_young_free_cset_time_ms;
1524       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1525                                              (double) _recorded_young_regions);
1526     }
1527     double non_young_other_time_ms = 0.0;
1528     if (_recorded_non_young_regions > 0) {
1529       non_young_other_time_ms =
1530         _recorded_non_young_cset_choice_time_ms +
1531         _recorded_non_young_free_cset_time_ms;
1532 
1533       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1534                                          (double) _recorded_non_young_regions);
1535     }
1536 
1537     double constant_other_time_ms = all_other_time_ms -
1538       (young_other_time_ms + non_young_other_time_ms);
1539     _constant_other_time_ms_seq->add(constant_other_time_ms);
1540 
1541     double survival_ratio = 0.0;
1542     if (_bytes_in_collection_set_before_gc > 0) {
1543       survival_ratio = (double) _bytes_copied_during_gc /
1544                                    (double) _bytes_in_collection_set_before_gc;
1545     }
1546 
1547     _pending_cards_seq->add((double) _pending_cards);
1548     _scanned_cards_seq->add((double) cards_scanned);
1549     _rs_lengths_seq->add((double) _max_rs_lengths);
1550 
1551     double expensive_region_limit_ms =
1552       (double) MaxGCPauseMillis - predict_constant_other_time_ms();
1553     if (expensive_region_limit_ms < 0.0) {
1554       // this means that the other time was predicted to be longer than
1555       // than the max pause time
1556       expensive_region_limit_ms = (double) MaxGCPauseMillis;
1557     }
1558     _expensive_region_limit_ms = expensive_region_limit_ms;
1559 
1560     if (PREDICTIONS_VERBOSE) {
1561       gclog_or_tty->print_cr("");
1562       gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
1563                     "REGIONS %d %d %d "
1564                     "PENDING_CARDS %d %d "
1565                     "CARDS_SCANNED %d %d "
1566                     "RS_LENGTHS %d %d "
1567                     "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
1568                     "SURVIVAL_RATIO %1.6lf %1.6lf "
1569                     "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
1570                     "OTHER_YOUNG %1.6lf %1.6lf "
1571                     "OTHER_NON_YOUNG %1.6lf %1.6lf "
1572                     "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
1573                     "ELAPSED %1.6lf %1.6lf ",
1574                     _cur_collection_start_sec,
1575                     (!_last_young_gc_full) ? 2 :
1576                     (last_pause_included_initial_mark) ? 1 : 0,
1577                     _recorded_region_num,
1578                     _recorded_young_regions,
1579                     _recorded_non_young_regions,
1580                     _predicted_pending_cards, _pending_cards,
1581                     _predicted_cards_scanned, cards_scanned,
1582                     _predicted_rs_lengths, _max_rs_lengths,
1583                     _predicted_rs_update_time_ms, update_rs_time,
1584                     _predicted_rs_scan_time_ms, scan_rs_time,
1585                     _predicted_survival_ratio, survival_ratio,
1586                     _predicted_object_copy_time_ms, obj_copy_time,
1587                     _predicted_constant_other_time_ms, constant_other_time_ms,
1588                     _predicted_young_other_time_ms, young_other_time_ms,
1589                     _predicted_non_young_other_time_ms,
1590                     non_young_other_time_ms,
1591                     _vtime_diff_ms, termination_time,
1592                     _predicted_pause_time_ms, elapsed_ms);
1593     }
1594 
1595     if (G1PolicyVerbose > 0) {
1596       gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
1597                     _predicted_pause_time_ms,
1598                     (_within_target) ? "within" : "outside",
1599                     elapsed_ms);
1600     }
1601 
1602   }
1603 
1604   _in_marking_window = new_in_marking_window;
1605   _in_marking_window_im = new_in_marking_window_im;
1606   _free_regions_at_end_of_collection = _g1->free_regions();
1607   calculate_young_list_min_length();
1608   calculate_young_list_target_length();
1609 
1610   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1611   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1612   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
1613   // </NEW PREDICTION>
1614 }
1615 
1616 #define EXT_SIZE_FORMAT "%d%s"
1617 #define EXT_SIZE_PARAMS(bytes)                                  \
1618   byte_size_in_proper_unit((bytes)),                            \
1619   proper_unit_for_byte_size((bytes))
1620 
1621 void G1CollectorPolicy::print_heap_transition() {
1622   if (PrintGCDetails) {
1623     YoungList* young_list = _g1->young_list();
1624     size_t eden_bytes = young_list->eden_used_bytes();
1625     size_t survivor_bytes = young_list->survivor_used_bytes();
1626     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1627     size_t used = _g1->used();
1628     size_t capacity = _g1->capacity();
1629 
1630     gclog_or_tty->print_cr(
1631          "   [Eden: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
1632              "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
1633              "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
1634                      EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
1635              EXT_SIZE_PARAMS(_eden_bytes_before_gc),
1636                EXT_SIZE_PARAMS(eden_bytes),
1637              EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
1638                EXT_SIZE_PARAMS(survivor_bytes),
1639              EXT_SIZE_PARAMS(used_before_gc),
1640              EXT_SIZE_PARAMS(_capacity_before_gc),
1641                EXT_SIZE_PARAMS(used),
1642                EXT_SIZE_PARAMS(capacity));
1643   } else if (PrintGC) {
1644     _g1->print_size_transition(gclog_or_tty,
1645                                _cur_collection_pause_used_at_start_bytes,
1646                                _g1->used(), _g1->capacity());
1647   }
1648 }
1649 
1650 // <NEW PREDICTION>
1651 
1652 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1653                                                      double update_rs_processed_buffers,
1654                                                      double goal_ms) {
1655   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1656   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1657 
1658   if (G1UseAdaptiveConcRefinement) {
1659     const int k_gy = 3, k_gr = 6;
1660     const double inc_k = 1.1, dec_k = 0.9;
1661 
1662     int g = cg1r->green_zone();
1663     if (update_rs_time > goal_ms) {
1664       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
1665     } else {
1666       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1667         g = (int)MAX2(g * inc_k, g + 1.0);
1668       }
1669     }
1670     // Change the refinement threads params
1671     cg1r->set_green_zone(g);
1672     cg1r->set_yellow_zone(g * k_gy);
1673     cg1r->set_red_zone(g * k_gr);
1674     cg1r->reinitialize_threads();
1675 
1676     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
1677     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1678                                     cg1r->yellow_zone());
1679     // Change the barrier params
1680     dcqs.set_process_completed_threshold(processing_threshold);
1681     dcqs.set_max_completed_queue(cg1r->red_zone());
1682   }
1683 
1684   int curr_queue_size = dcqs.completed_buffers_num();
1685   if (curr_queue_size >= cg1r->yellow_zone()) {
1686     dcqs.set_completed_queue_padding(curr_queue_size);
1687   } else {
1688     dcqs.set_completed_queue_padding(0);
1689   }
1690   dcqs.notify_if_necessary();
1691 }
1692 
1693 double
1694 G1CollectorPolicy::
1695 predict_young_collection_elapsed_time_ms(size_t adjustment) {
1696   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
1697 
1698   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1699   size_t young_num = g1h->young_list()->length();
1700   if (young_num == 0)
1701     return 0.0;
1702 
1703   young_num += adjustment;
1704   size_t pending_cards = predict_pending_cards();
1705   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
1706                       predict_rs_length_diff();
1707   size_t card_num;
1708   if (full_young_gcs())
1709     card_num = predict_young_card_num(rs_lengths);
1710   else
1711     card_num = predict_non_young_card_num(rs_lengths);
1712   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
1713   double accum_yg_surv_rate =
1714     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
1715 
1716   size_t bytes_to_copy =
1717     (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
1718 
1719   return
1720     predict_rs_update_time_ms(pending_cards) +
1721     predict_rs_scan_time_ms(card_num) +
1722     predict_object_copy_time_ms(bytes_to_copy) +
1723     predict_young_other_time_ms(young_num) +
1724     predict_constant_other_time_ms();
1725 }
1726 
1727 double
1728 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1729   size_t rs_length = predict_rs_length_diff();
1730   size_t card_num;
1731   if (full_young_gcs())
1732     card_num = predict_young_card_num(rs_length);
1733   else
1734     card_num = predict_non_young_card_num(rs_length);
1735   return predict_base_elapsed_time_ms(pending_cards, card_num);
1736 }
1737 
1738 double
1739 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1740                                                 size_t scanned_cards) {
1741   return
1742     predict_rs_update_time_ms(pending_cards) +
1743     predict_rs_scan_time_ms(scanned_cards) +
1744     predict_constant_other_time_ms();
1745 }
1746 
1747 double
1748 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1749                                                   bool young) {
1750   size_t rs_length = hr->rem_set()->occupied();
1751   size_t card_num;
1752   if (full_young_gcs())
1753     card_num = predict_young_card_num(rs_length);
1754   else
1755     card_num = predict_non_young_card_num(rs_length);
1756   size_t bytes_to_copy = predict_bytes_to_copy(hr);
1757 
1758   double region_elapsed_time_ms =
1759     predict_rs_scan_time_ms(card_num) +
1760     predict_object_copy_time_ms(bytes_to_copy);
1761 
1762   if (young)
1763     region_elapsed_time_ms += predict_young_other_time_ms(1);
1764   else
1765     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1766 
1767   return region_elapsed_time_ms;
1768 }
1769 
1770 size_t
1771 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1772   size_t bytes_to_copy;
1773   if (hr->is_marked())
1774     bytes_to_copy = hr->max_live_bytes();
1775   else {
1776     guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
1777                "invariant" );
1778     int age = hr->age_in_surv_rate_group();
1779     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1780     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1781   }
1782 
1783   return bytes_to_copy;
1784 }
1785 
1786 void
1787 G1CollectorPolicy::start_recording_regions() {
1788   _recorded_rs_lengths            = 0;
1789   _recorded_young_regions         = 0;
1790   _recorded_non_young_regions     = 0;
1791 
1792 #if PREDICTIONS_VERBOSE
1793   _recorded_marked_bytes          = 0;
1794   _recorded_young_bytes           = 0;
1795   _predicted_bytes_to_copy        = 0;
1796   _predicted_rs_lengths           = 0;
1797   _predicted_cards_scanned        = 0;
1798 #endif // PREDICTIONS_VERBOSE
1799 }
1800 
1801 void
1802 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
1803 #if PREDICTIONS_VERBOSE
1804   if (!young) {
1805     _recorded_marked_bytes += hr->max_live_bytes();
1806   }
1807   _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
1808 #endif // PREDICTIONS_VERBOSE
1809 
1810   size_t rs_length = hr->rem_set()->occupied();
1811   _recorded_rs_lengths += rs_length;
1812 }
1813 
1814 void
1815 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
1816   assert(!hr->is_young(), "should not call this");
1817   ++_recorded_non_young_regions;
1818   record_cset_region_info(hr, false);
1819 }
1820 
1821 void
1822 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
1823   _recorded_young_regions = n_regions;
1824 }
1825 
1826 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
1827 #if PREDICTIONS_VERBOSE
1828   _recorded_young_bytes = bytes;
1829 #endif // PREDICTIONS_VERBOSE
1830 }
1831 
1832 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1833   _recorded_rs_lengths = rs_lengths;
1834 }
1835 
1836 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
1837   _predicted_bytes_to_copy = bytes;
1838 }
1839 
1840 void
1841 G1CollectorPolicy::end_recording_regions() {
1842   // The _predicted_pause_time_ms field is referenced in code
1843   // not under PREDICTIONS_VERBOSE. Let's initialize it.
1844   _predicted_pause_time_ms = -1.0;
1845 
1846 #if PREDICTIONS_VERBOSE
1847   _predicted_pending_cards = predict_pending_cards();
1848   _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
1849   if (full_young_gcs())
1850     _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
1851   else
1852     _predicted_cards_scanned +=
1853       predict_non_young_card_num(_predicted_rs_lengths);
1854   _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
1855 
1856   _predicted_rs_update_time_ms =
1857     predict_rs_update_time_ms(_g1->pending_card_num());
1858   _predicted_rs_scan_time_ms =
1859     predict_rs_scan_time_ms(_predicted_cards_scanned);
1860   _predicted_object_copy_time_ms =
1861     predict_object_copy_time_ms(_predicted_bytes_to_copy);
1862   _predicted_constant_other_time_ms =
1863     predict_constant_other_time_ms();
1864   _predicted_young_other_time_ms =
1865     predict_young_other_time_ms(_recorded_young_regions);
1866   _predicted_non_young_other_time_ms =
1867     predict_non_young_other_time_ms(_recorded_non_young_regions);
1868 
1869   _predicted_pause_time_ms =
1870     _predicted_rs_update_time_ms +
1871     _predicted_rs_scan_time_ms +
1872     _predicted_object_copy_time_ms +
1873     _predicted_constant_other_time_ms +
1874     _predicted_young_other_time_ms +
1875     _predicted_non_young_other_time_ms;
1876 #endif // PREDICTIONS_VERBOSE
1877 }
1878 
1879 void G1CollectorPolicy::check_if_region_is_too_expensive(double
1880                                                            predicted_time_ms) {
1881   // I don't think we need to do this when in young GC mode since
1882   // marking will be initiated next time we hit the soft limit anyway...
1883   if (predicted_time_ms > _expensive_region_limit_ms) {
1884     // no point in doing another partial one
1885     _should_revert_to_full_young_gcs = true;
1886   }
1887 }
1888 
1889 // </NEW PREDICTION>
1890 
1891 
1892 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1893                                                double elapsed_ms) {
1894   _recent_gc_times_ms->add(elapsed_ms);
1895   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1896   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1897 }
1898 
1899 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
1900   if (_recent_pause_times_ms->num() == 0) {
1901     return (double) MaxGCPauseMillis;
1902   }
1903   return _recent_pause_times_ms->avg();
1904 }
1905 
1906 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
1907   if (_recent_rs_scan_times_ms->num() == 0) {
1908     return (double)MaxGCPauseMillis/3.0;
1909   }
1910   return _recent_rs_scan_times_ms->avg();
1911 }
1912 
1913 int G1CollectorPolicy::number_of_recent_gcs() {
1914   assert(_recent_rs_scan_times_ms->num() ==
1915          _recent_pause_times_ms->num(), "Sequence out of sync");
1916   assert(_recent_pause_times_ms->num() ==
1917          _recent_CS_bytes_used_before->num(), "Sequence out of sync");
1918   assert(_recent_CS_bytes_used_before->num() ==
1919          _recent_CS_bytes_surviving->num(), "Sequence out of sync");
1920 
1921   return _recent_pause_times_ms->num();
1922 }
1923 
1924 double G1CollectorPolicy::recent_avg_survival_fraction() {
1925   return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
1926                                            _recent_CS_bytes_used_before);
1927 }
1928 
1929 double G1CollectorPolicy::last_survival_fraction() {
1930   return last_survival_fraction_work(_recent_CS_bytes_surviving,
1931                                      _recent_CS_bytes_used_before);
1932 }
1933 
1934 double
1935 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
1936                                                      TruncatedSeq* before) {
1937   assert(surviving->num() == before->num(), "Sequence out of sync");
1938   if (before->sum() > 0.0) {
1939       double recent_survival_rate = surviving->sum() / before->sum();
1940       // We exempt parallel collection from this check because Alloc Buffer
1941       // fragmentation can produce negative collections.
1942       // Further, we're now always doing parallel collection.  But I'm still
1943       // leaving this here as a placeholder for a more precise assertion later.
1944       // (DLD, 10/05.)
1945       assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
1946              _g1->evacuation_failed() ||
1947              recent_survival_rate <= 1.0, "Or bad frac");
1948       return recent_survival_rate;
1949   } else {
1950     return 1.0; // Be conservative.
1951   }
1952 }
1953 
1954 double
1955 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
1956                                                TruncatedSeq* before) {
1957   assert(surviving->num() == before->num(), "Sequence out of sync");
1958   if (surviving->num() > 0 && before->last() > 0.0) {
1959     double last_survival_rate = surviving->last() / before->last();
1960     // We exempt parallel collection from this check because Alloc Buffer
1961     // fragmentation can produce negative collections.
1962     // Further, we're now always doing parallel collection.  But I'm still
1963     // leaving this here as a placeholder for a more precise assertion later.
1964     // (DLD, 10/05.)
1965     assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
1966            last_survival_rate <= 1.0, "Or bad frac");
1967     return last_survival_rate;
1968   } else {
1969     return 1.0;
1970   }
1971 }
1972 
1973 static const int survival_min_obs = 5;
1974 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
1975 static const double min_survival_rate = 0.1;
1976 
1977 double
1978 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
1979                                                            double latest) {
1980   double res = avg;
1981   if (number_of_recent_gcs() < survival_min_obs) {
1982     res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
1983   }
1984   res = MAX2(res, latest);
1985   res = MAX2(res, min_survival_rate);
1986   // In the parallel case, LAB fragmentation can produce "negative
1987   // collections"; so can evac failure.  Cap at 1.0
1988   res = MIN2(res, 1.0);
1989   return res;
1990 }
1991 
1992 size_t G1CollectorPolicy::expansion_amount() {
1993   if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) {
1994     // We will double the existing space, or take
1995     // G1ExpandByPercentOfAvailable % of the available expansion
1996     // space, whichever is smaller, bounded below by a minimum
1997     // expansion (unless that's all that's left.)
1998     const size_t min_expand_bytes = 1*M;
1999     size_t reserved_bytes = _g1->max_capacity();
2000     size_t committed_bytes = _g1->capacity();
2001     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
2002     size_t expand_bytes;
2003     size_t expand_bytes_via_pct =
2004       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
2005     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
2006     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
2007     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
2008     if (G1PolicyVerbose > 1) {
2009       gclog_or_tty->print("Decided to expand: ratio = %5.2f, "
2010                  "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n"
2011                  "                   Answer = %d.\n",
2012                  recent_avg_pause_time_ratio(),
2013                  byte_size_in_proper_unit(committed_bytes),
2014                  proper_unit_for_byte_size(committed_bytes),
2015                  byte_size_in_proper_unit(uncommitted_bytes),
2016                  proper_unit_for_byte_size(uncommitted_bytes),
2017                  byte_size_in_proper_unit(expand_bytes_via_pct),
2018                  proper_unit_for_byte_size(expand_bytes_via_pct),
2019                  byte_size_in_proper_unit(expand_bytes),
2020                  proper_unit_for_byte_size(expand_bytes));
2021     }
2022     return expand_bytes;
2023   } else {
2024     return 0;
2025   }
2026 }
2027 
2028 void G1CollectorPolicy::note_start_of_mark_thread() {
2029   _mark_thread_startup_sec = os::elapsedTime();
2030 }
2031 
2032 class CountCSClosure: public HeapRegionClosure {
2033   G1CollectorPolicy* _g1_policy;
2034 public:
2035   CountCSClosure(G1CollectorPolicy* g1_policy) :
2036     _g1_policy(g1_policy) {}
2037   bool doHeapRegion(HeapRegion* r) {
2038     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
2039     return false;
2040   }
2041 };
2042 
2043 void G1CollectorPolicy::count_CS_bytes_used() {
2044   CountCSClosure cs_closure(this);
2045   _g1->collection_set_iterate(&cs_closure);
2046 }
2047 
2048 void G1CollectorPolicy::print_summary (int level,
2049                                        const char* str,
2050                                        NumberSeq* seq) const {
2051   double sum = seq->sum();
2052   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
2053                 str, sum / 1000.0, seq->avg());
2054 }
2055 
2056 void G1CollectorPolicy::print_summary_sd (int level,
2057                                           const char* str,
2058                                           NumberSeq* seq) const {
2059   print_summary(level, str, seq);
2060   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2061                 seq->num(), seq->sd(), seq->maximum());
2062 }
2063 
2064 void G1CollectorPolicy::check_other_times(int level,
2065                                         NumberSeq* other_times_ms,
2066                                         NumberSeq* calc_other_times_ms) const {
2067   bool should_print = false;
2068   LineBuffer buf(level + 2);
2069 
2070   double max_sum = MAX2(fabs(other_times_ms->sum()),
2071                         fabs(calc_other_times_ms->sum()));
2072   double min_sum = MIN2(fabs(other_times_ms->sum()),
2073                         fabs(calc_other_times_ms->sum()));
2074   double sum_ratio = max_sum / min_sum;
2075   if (sum_ratio > 1.1) {
2076     should_print = true;
2077     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
2078   }
2079 
2080   double max_avg = MAX2(fabs(other_times_ms->avg()),
2081                         fabs(calc_other_times_ms->avg()));
2082   double min_avg = MIN2(fabs(other_times_ms->avg()),
2083                         fabs(calc_other_times_ms->avg()));
2084   double avg_ratio = max_avg / min_avg;
2085   if (avg_ratio > 1.1) {
2086     should_print = true;
2087     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
2088   }
2089 
2090   if (other_times_ms->sum() < -0.01) {
2091     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
2092   }
2093 
2094   if (other_times_ms->avg() < -0.01) {
2095     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
2096   }
2097 
2098   if (calc_other_times_ms->sum() < -0.01) {
2099     should_print = true;
2100     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
2101   }
2102 
2103   if (calc_other_times_ms->avg() < -0.01) {
2104     should_print = true;
2105     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
2106   }
2107 
2108   if (should_print)
2109     print_summary(level, "Other(Calc)", calc_other_times_ms);
2110 }
2111 
2112 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
2113   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
2114   MainBodySummary*    body_summary = summary->main_body_summary();
2115   if (summary->get_total_seq()->num() > 0) {
2116     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
2117     if (body_summary != NULL) {
2118       print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
2119       if (parallel) {
2120         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
2121         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
2122         print_summary(2, "Ext Root Scanning",
2123                       body_summary->get_ext_root_scan_seq());
2124         print_summary(2, "Mark Stack Scanning",
2125                       body_summary->get_mark_stack_scan_seq());
2126         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
2127         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
2128         print_summary(2, "Termination", body_summary->get_termination_seq());
2129         print_summary(2, "Other", body_summary->get_parallel_other_seq());
2130         {
2131           NumberSeq* other_parts[] = {
2132             body_summary->get_update_rs_seq(),
2133             body_summary->get_ext_root_scan_seq(),
2134             body_summary->get_mark_stack_scan_seq(),
2135             body_summary->get_scan_rs_seq(),
2136             body_summary->get_obj_copy_seq(),
2137             body_summary->get_termination_seq()
2138           };
2139           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
2140                                         6, other_parts);
2141           check_other_times(2, body_summary->get_parallel_other_seq(),
2142                             &calc_other_times_ms);
2143         }
2144         print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
2145         print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
2146       } else {
2147         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
2148         print_summary(1, "Ext Root Scanning",
2149                       body_summary->get_ext_root_scan_seq());
2150         print_summary(1, "Mark Stack Scanning",
2151                       body_summary->get_mark_stack_scan_seq());
2152         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
2153         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
2154       }
2155     }
2156     print_summary(1, "Other", summary->get_other_seq());
2157     {
2158       if (body_summary != NULL) {
2159         NumberSeq calc_other_times_ms;
2160         if (parallel) {
2161           // parallel
2162           NumberSeq* other_parts[] = {
2163             body_summary->get_satb_drain_seq(),
2164             body_summary->get_parallel_seq(),
2165             body_summary->get_clear_ct_seq()
2166           };
2167           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
2168                                                 3, other_parts);
2169         } else {
2170           // serial
2171           NumberSeq* other_parts[] = {
2172             body_summary->get_satb_drain_seq(),
2173             body_summary->get_update_rs_seq(),
2174             body_summary->get_ext_root_scan_seq(),
2175             body_summary->get_mark_stack_scan_seq(),
2176             body_summary->get_scan_rs_seq(),
2177             body_summary->get_obj_copy_seq()
2178           };
2179           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
2180                                                 6, other_parts);
2181         }
2182         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
2183       }
2184     }
2185   } else {
2186     LineBuffer(1).append_and_print_cr("none");
2187   }
2188   LineBuffer(0).append_and_print_cr("");
2189 }
2190 
2191 void G1CollectorPolicy::print_tracing_info() const {
2192   if (TraceGen0Time) {
2193     gclog_or_tty->print_cr("ALL PAUSES");
2194     print_summary_sd(0, "Total", _all_pause_times_ms);
2195     gclog_or_tty->print_cr("");
2196     gclog_or_tty->print_cr("");
2197     gclog_or_tty->print_cr("   Full Young GC Pauses:    %8d", _full_young_pause_num);
2198     gclog_or_tty->print_cr("   Partial Young GC Pauses: %8d", _partial_young_pause_num);
2199     gclog_or_tty->print_cr("");
2200 
2201     gclog_or_tty->print_cr("EVACUATION PAUSES");
2202     print_summary(_summary);
2203 
2204     gclog_or_tty->print_cr("MISC");
2205     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
2206     print_summary_sd(0, "Yields", _all_yield_times_ms);
2207     for (int i = 0; i < _aux_num; ++i) {
2208       if (_all_aux_times_ms[i].num() > 0) {
2209         char buffer[96];
2210         sprintf(buffer, "Aux%d", i);
2211         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
2212       }
2213     }
2214 
2215     size_t all_region_num = _region_num_young + _region_num_tenured;
2216     gclog_or_tty->print_cr("   New Regions %8d, Young %8d (%6.2lf%%), "
2217                "Tenured %8d (%6.2lf%%)",
2218                all_region_num,
2219                _region_num_young,
2220                (double) _region_num_young / (double) all_region_num * 100.0,
2221                _region_num_tenured,
2222                (double) _region_num_tenured / (double) all_region_num * 100.0);
2223   }
2224   if (TraceGen1Time) {
2225     if (_all_full_gc_times_ms->num() > 0) {
2226       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2227                  _all_full_gc_times_ms->num(),
2228                  _all_full_gc_times_ms->sum() / 1000.0);
2229       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
2230       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
2231                     _all_full_gc_times_ms->sd(),
2232                     _all_full_gc_times_ms->maximum());
2233     }
2234   }
2235 }
2236 
2237 void G1CollectorPolicy::print_yg_surv_rate_info() const {
2238 #ifndef PRODUCT
2239   _short_lived_surv_rate_group->print_surv_rate_summary();
2240   // add this call for any other surv rate groups
2241 #endif // PRODUCT
2242 }
2243 
2244 void
2245 G1CollectorPolicy::update_region_num(bool young) {
2246   if (young) {
2247     ++_region_num_young;
2248   } else {
2249     ++_region_num_tenured;
2250   }
2251 }
2252 
2253 #ifndef PRODUCT
2254 // for debugging, bit of a hack...
2255 static char*
2256 region_num_to_mbs(int length) {
2257   static char buffer[64];
2258   double bytes = (double) (length * HeapRegion::GrainBytes);
2259   double mbs = bytes / (double) (1024 * 1024);
2260   sprintf(buffer, "%7.2lfMB", mbs);
2261   return buffer;
2262 }
2263 #endif // PRODUCT
2264 
2265 size_t G1CollectorPolicy::max_regions(int purpose) {
2266   switch (purpose) {
2267     case GCAllocForSurvived:
2268       return _max_survivor_regions;
2269     case GCAllocForTenured:
2270       return REGIONS_UNLIMITED;
2271     default:
2272       ShouldNotReachHere();
2273       return REGIONS_UNLIMITED;
2274   };
2275 }
2276 
2277 void G1CollectorPolicy::calculate_max_gc_locker_expansion() {
2278   size_t expansion_region_num = 0;
2279   if (GCLockerEdenExpansionPercent > 0) {
2280     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
2281     double expansion_region_num_d = perc * (double) _young_list_target_length;
2282     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
2283     // less than 1.0) we'll get 1.
2284     expansion_region_num = (size_t) ceil(expansion_region_num_d);
2285   } else {
2286     assert(expansion_region_num == 0, "sanity");
2287   }
2288   _young_list_max_length = _young_list_target_length + expansion_region_num;
2289   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
2290 }
2291 
2292 // Calculates survivor space parameters.
2293 void G1CollectorPolicy::calculate_survivors_policy()
2294 {
2295   if (G1FixedSurvivorSpaceSize == 0) {
2296     _max_survivor_regions = _young_list_target_length / SurvivorRatio;
2297   } else {
2298     _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
2299   }
2300 
2301   if (G1FixedTenuringThreshold) {
2302     _tenuring_threshold = MaxTenuringThreshold;
2303   } else {
2304     _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
2305         HeapRegion::GrainWords * _max_survivor_regions);
2306   }
2307 }
2308 
2309 #ifndef PRODUCT
2310 class HRSortIndexIsOKClosure: public HeapRegionClosure {
2311   CollectionSetChooser* _chooser;
2312 public:
2313   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
2314     _chooser(chooser) {}
2315 
2316   bool doHeapRegion(HeapRegion* r) {
2317     if (!r->continuesHumongous()) {
2318       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
2319     }
2320     return false;
2321   }
2322 };
2323 
2324 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
2325   HRSortIndexIsOKClosure cl(_collectionSetChooser);
2326   _g1->heap_region_iterate(&cl);
2327   return true;
2328 }
2329 #endif
2330 
2331 bool
2332 G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
2333   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
2334   if (!during_cycle) {
2335     set_initiate_conc_mark_if_possible();
2336     return true;
2337   } else {
2338     return false;
2339   }
2340 }
2341 
2342 void
2343 G1CollectorPolicy::decide_on_conc_mark_initiation() {
2344   // We are about to decide on whether this pause will be an
2345   // initial-mark pause.
2346 
2347   // First, during_initial_mark_pause() should not be already set. We
2348   // will set it here if we have to. However, it should be cleared by
2349   // the end of the pause (it's only set for the duration of an
2350   // initial-mark pause).
2351   assert(!during_initial_mark_pause(), "pre-condition");
2352 
2353   if (initiate_conc_mark_if_possible()) {
2354     // We had noticed on a previous pause that the heap occupancy has
2355     // gone over the initiating threshold and we should start a
2356     // concurrent marking cycle. So we might initiate one.
2357 
2358     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
2359     if (!during_cycle) {
2360       // The concurrent marking thread is not "during a cycle", i.e.,
2361       // it has completed the last one. So we can go ahead and
2362       // initiate a new cycle.
2363 
2364       set_during_initial_mark_pause();
2365 
2366       // And we can now clear initiate_conc_mark_if_possible() as
2367       // we've already acted on it.
2368       clear_initiate_conc_mark_if_possible();
2369     } else {
2370       // The concurrent marking thread is still finishing up the
2371       // previous cycle. If we start one right now the two cycles
2372       // overlap. In particular, the concurrent marking thread might
2373       // be in the process of clearing the next marking bitmap (which
2374       // we will use for the next cycle if we start one). Starting a
2375       // cycle now will be bad given that parts of the marking
2376       // information might get cleared by the marking thread. And we
2377       // cannot wait for the marking thread to finish the cycle as it
2378       // periodically yields while clearing the next marking bitmap
2379       // and, if it's in a yield point, it's waiting for us to
2380       // finish. So, at this point we will not start a cycle and we'll
2381       // let the concurrent marking thread complete the last one.
2382     }
2383   }
2384 }
2385 
2386 void
2387 G1CollectorPolicy_BestRegionsFirst::
2388 record_collection_pause_start(double start_time_sec, size_t start_used) {
2389   G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
2390 }
2391 
2392 class KnownGarbageClosure: public HeapRegionClosure {
2393   CollectionSetChooser* _hrSorted;
2394 
2395 public:
2396   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
2397     _hrSorted(hrSorted)
2398   {}
2399 
2400   bool doHeapRegion(HeapRegion* r) {
2401     // We only include humongous regions in collection
2402     // sets when concurrent mark shows that their contained object is
2403     // unreachable.
2404 
2405     // Do we have any marking information for this region?
2406     if (r->is_marked()) {
2407       // We don't include humongous regions in collection
2408       // sets because we collect them immediately at the end of a marking
2409       // cycle.  We also don't include young regions because we *must*
2410       // include them in the next collection pause.
2411       if (!r->isHumongous() && !r->is_young()) {
2412         _hrSorted->addMarkedHeapRegion(r);
2413       }
2414     }
2415     return false;
2416   }
2417 };
2418 
2419 class ParKnownGarbageHRClosure: public HeapRegionClosure {
2420   CollectionSetChooser* _hrSorted;
2421   jint _marked_regions_added;
2422   jint _chunk_size;
2423   jint _cur_chunk_idx;
2424   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
2425   int _worker;
2426   int _invokes;
2427 
2428   void get_new_chunk() {
2429     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
2430     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
2431   }
2432   void add_region(HeapRegion* r) {
2433     if (_cur_chunk_idx == _cur_chunk_end) {
2434       get_new_chunk();
2435     }
2436     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
2437     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
2438     _marked_regions_added++;
2439     _cur_chunk_idx++;
2440   }
2441 
2442 public:
2443   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
2444                            jint chunk_size,
2445                            int worker) :
2446     _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
2447     _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
2448     _invokes(0)
2449   {}
2450 
2451   bool doHeapRegion(HeapRegion* r) {
2452     // We only include humongous regions in collection
2453     // sets when concurrent mark shows that their contained object is
2454     // unreachable.
2455     _invokes++;
2456 
2457     // Do we have any marking information for this region?
2458     if (r->is_marked()) {
2459       // We don't include humongous regions in collection
2460       // sets because we collect them immediately at the end of a marking
2461       // cycle.
2462       // We also do not include young regions in collection sets
2463       if (!r->isHumongous() && !r->is_young()) {
2464         add_region(r);
2465       }
2466     }
2467     return false;
2468   }
2469   jint marked_regions_added() { return _marked_regions_added; }
2470   int invokes() { return _invokes; }
2471 };
2472 
2473 class ParKnownGarbageTask: public AbstractGangTask {
2474   CollectionSetChooser* _hrSorted;
2475   jint _chunk_size;
2476   G1CollectedHeap* _g1;
2477 public:
2478   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
2479     AbstractGangTask("ParKnownGarbageTask"),
2480     _hrSorted(hrSorted), _chunk_size(chunk_size),
2481     _g1(G1CollectedHeap::heap())
2482   {}
2483 
2484   void work(int i) {
2485     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
2486     // Back to zero for the claim value.
2487     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
2488                                          HeapRegion::InitialClaimValue);
2489     jint regions_added = parKnownGarbageCl.marked_regions_added();
2490     _hrSorted->incNumMarkedHeapRegions(regions_added);
2491     if (G1PrintParCleanupStats) {
2492       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
2493                  i, parKnownGarbageCl.invokes(), regions_added);
2494     }
2495   }
2496 };
2497 
2498 void
2499 G1CollectorPolicy_BestRegionsFirst::
2500 record_concurrent_mark_cleanup_end(size_t freed_bytes,
2501                                    size_t max_live_bytes) {
2502   double start;
2503   if (G1PrintParCleanupStats) start = os::elapsedTime();
2504   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
2505 
2506   _collectionSetChooser->clearMarkedHeapRegions();
2507   double clear_marked_end;
2508   if (G1PrintParCleanupStats) {
2509     clear_marked_end = os::elapsedTime();
2510     gclog_or_tty->print_cr("  clear marked regions + work1: %8.3f ms.",
2511                   (clear_marked_end - start)*1000.0);
2512   }
2513   if (G1CollectedHeap::use_parallel_gc_threads()) {
2514     const size_t OverpartitionFactor = 4;
2515     const size_t MinWorkUnit = 8;
2516     const size_t WorkUnit =
2517       MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
2518            MinWorkUnit);
2519     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
2520                                                              WorkUnit);
2521     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
2522                                             (int) WorkUnit);
2523     _g1->workers()->run_task(&parKnownGarbageTask);
2524 
2525     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2526            "sanity check");
2527   } else {
2528     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
2529     _g1->heap_region_iterate(&knownGarbagecl);
2530   }
2531   double known_garbage_end;
2532   if (G1PrintParCleanupStats) {
2533     known_garbage_end = os::elapsedTime();
2534     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
2535                   (known_garbage_end - clear_marked_end)*1000.0);
2536   }
2537   _collectionSetChooser->sortMarkedHeapRegions();
2538   double sort_end;
2539   if (G1PrintParCleanupStats) {
2540     sort_end = os::elapsedTime();
2541     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
2542                   (sort_end - known_garbage_end)*1000.0);
2543   }
2544 
2545   record_concurrent_mark_cleanup_end_work2();
2546   double work2_end;
2547   if (G1PrintParCleanupStats) {
2548     work2_end = os::elapsedTime();
2549     gclog_or_tty->print_cr("  work2: %8.3f ms.",
2550                   (work2_end - sort_end)*1000.0);
2551   }
2552 }
2553 
2554 // Add the heap region at the head of the non-incremental collection set
2555 void G1CollectorPolicy::
2556 add_to_collection_set(HeapRegion* hr) {
2557   assert(_inc_cset_build_state == Active, "Precondition");
2558   assert(!hr->is_young(), "non-incremental add of young region");
2559 
2560   if (_g1->mark_in_progress())
2561     _g1->concurrent_mark()->registerCSetRegion(hr);
2562 
2563   assert(!hr->in_collection_set(), "should not already be in the CSet");
2564   hr->set_in_collection_set(true);
2565   hr->set_next_in_collection_set(_collection_set);
2566   _collection_set = hr;
2567   _collection_set_size++;
2568   _collection_set_bytes_used_before += hr->used();
2569   _g1->register_region_with_in_cset_fast_test(hr);
2570 }
2571 
2572 // Initialize the per-collection-set information
2573 void G1CollectorPolicy::start_incremental_cset_building() {
2574   assert(_inc_cset_build_state == Inactive, "Precondition");
2575 
2576   _inc_cset_head = NULL;
2577   _inc_cset_tail = NULL;
2578   _inc_cset_size = 0;
2579   _inc_cset_bytes_used_before = 0;
2580 
2581   _inc_cset_young_index = 0;
2582 
2583   _inc_cset_max_finger = 0;
2584   _inc_cset_recorded_young_bytes = 0;
2585   _inc_cset_recorded_rs_lengths = 0;
2586   _inc_cset_predicted_elapsed_time_ms = 0;
2587   _inc_cset_predicted_bytes_to_copy = 0;
2588   _inc_cset_build_state = Active;
2589 }
2590 
2591 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
2592   // This routine is used when:
2593   // * adding survivor regions to the incremental cset at the end of an
2594   //   evacuation pause,
2595   // * adding the current allocation region to the incremental cset
2596   //   when it is retired, and
2597   // * updating existing policy information for a region in the
2598   //   incremental cset via young list RSet sampling.
2599   // Therefore this routine may be called at a safepoint by the
2600   // VM thread, or in-between safepoints by mutator threads (when
2601   // retiring the current allocation region) or a concurrent
2602   // refine thread (RSet sampling).
2603 
2604   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
2605   size_t used_bytes = hr->used();
2606 
2607   _inc_cset_recorded_rs_lengths += rs_length;
2608   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
2609 
2610   _inc_cset_bytes_used_before += used_bytes;
2611 
2612   // Cache the values we have added to the aggregated informtion
2613   // in the heap region in case we have to remove this region from
2614   // the incremental collection set, or it is updated by the
2615   // rset sampling code
2616   hr->set_recorded_rs_length(rs_length);
2617   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
2618 
2619 #if PREDICTIONS_VERBOSE
2620   size_t bytes_to_copy = predict_bytes_to_copy(hr);
2621   _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
2622 
2623   // Record the number of bytes used in this region
2624   _inc_cset_recorded_young_bytes += used_bytes;
2625 
2626   // Cache the values we have added to the aggregated informtion
2627   // in the heap region in case we have to remove this region from
2628   // the incremental collection set, or it is updated by the
2629   // rset sampling code
2630   hr->set_predicted_bytes_to_copy(bytes_to_copy);
2631 #endif // PREDICTIONS_VERBOSE
2632 }
2633 
2634 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
2635   // This routine is currently only called as part of the updating of
2636   // existing policy information for regions in the incremental cset that
2637   // is performed by the concurrent refine thread(s) as part of young list
2638   // RSet sampling. Therefore we should not be at a safepoint.
2639 
2640   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
2641   assert(hr->is_young(), "it should be");
2642 
2643   size_t used_bytes = hr->used();
2644   size_t old_rs_length = hr->recorded_rs_length();
2645   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
2646 
2647   // Subtract the old recorded/predicted policy information for
2648   // the given heap region from the collection set info.
2649   _inc_cset_recorded_rs_lengths -= old_rs_length;
2650   _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
2651 
2652   _inc_cset_bytes_used_before -= used_bytes;
2653 
2654   // Clear the values cached in the heap region
2655   hr->set_recorded_rs_length(0);
2656   hr->set_predicted_elapsed_time_ms(0);
2657 
2658 #if PREDICTIONS_VERBOSE
2659   size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
2660   _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
2661 
2662   // Subtract the number of bytes used in this region
2663   _inc_cset_recorded_young_bytes -= used_bytes;
2664 
2665   // Clear the values cached in the heap region
2666   hr->set_predicted_bytes_to_copy(0);
2667 #endif // PREDICTIONS_VERBOSE
2668 }
2669 
2670 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
2671   // Update the collection set information that is dependent on the new RS length
2672   assert(hr->is_young(), "Precondition");
2673 
2674   remove_from_incremental_cset_info(hr);
2675   add_to_incremental_cset_info(hr, new_rs_length);
2676 }
2677 
2678 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
2679   assert( hr->is_young(), "invariant");
2680   assert( hr->young_index_in_cset() == -1, "invariant" );
2681   assert(_inc_cset_build_state == Active, "Precondition");
2682 
2683   // We need to clear and set the cached recorded/cached collection set
2684   // information in the heap region here (before the region gets added
2685   // to the collection set). An individual heap region's cached values
2686   // are calculated, aggregated with the policy collection set info,
2687   // and cached in the heap region here (initially) and (subsequently)
2688   // by the Young List sampling code.
2689 
2690   size_t rs_length = hr->rem_set()->occupied();
2691   add_to_incremental_cset_info(hr, rs_length);
2692 
2693   HeapWord* hr_end = hr->end();
2694   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
2695 
2696   assert(!hr->in_collection_set(), "invariant");
2697   hr->set_in_collection_set(true);
2698   assert( hr->next_in_collection_set() == NULL, "invariant");
2699 
2700   _inc_cset_size++;
2701   _g1->register_region_with_in_cset_fast_test(hr);
2702 
2703   hr->set_young_index_in_cset((int) _inc_cset_young_index);
2704   ++_inc_cset_young_index;
2705 }
2706 
2707 // Add the region at the RHS of the incremental cset
2708 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
2709   // We should only ever be appending survivors at the end of a pause
2710   assert( hr->is_survivor(), "Logic");
2711 
2712   // Do the 'common' stuff
2713   add_region_to_incremental_cset_common(hr);
2714 
2715   // Now add the region at the right hand side
2716   if (_inc_cset_tail == NULL) {
2717     assert(_inc_cset_head == NULL, "invariant");
2718     _inc_cset_head = hr;
2719   } else {
2720     _inc_cset_tail->set_next_in_collection_set(hr);
2721   }
2722   _inc_cset_tail = hr;
2723 }
2724 
2725 // Add the region to the LHS of the incremental cset
2726 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
2727   // Survivors should be added to the RHS at the end of a pause
2728   assert(!hr->is_survivor(), "Logic");
2729 
2730   // Do the 'common' stuff
2731   add_region_to_incremental_cset_common(hr);
2732 
2733   // Add the region at the left hand side
2734   hr->set_next_in_collection_set(_inc_cset_head);
2735   if (_inc_cset_head == NULL) {
2736     assert(_inc_cset_tail == NULL, "Invariant");
2737     _inc_cset_tail = hr;
2738   }
2739   _inc_cset_head = hr;
2740 }
2741 
2742 #ifndef PRODUCT
2743 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
2744   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
2745 
2746   st->print_cr("\nCollection_set:");
2747   HeapRegion* csr = list_head;
2748   while (csr != NULL) {
2749     HeapRegion* next = csr->next_in_collection_set();
2750     assert(csr->in_collection_set(), "bad CS");
2751     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
2752                  "age: %4d, y: %d, surv: %d",
2753                         csr->bottom(), csr->end(),
2754                         csr->top(),
2755                         csr->prev_top_at_mark_start(),
2756                         csr->next_top_at_mark_start(),
2757                         csr->top_at_conc_mark_count(),
2758                         csr->age_in_surv_rate_group_cond(),
2759                         csr->is_young(),
2760                         csr->is_survivor());
2761     csr = next;
2762   }
2763 }
2764 #endif // !PRODUCT
2765 
2766 void
2767 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
2768                                                   double target_pause_time_ms) {
2769   // Set this here - in case we're not doing young collections.
2770   double non_young_start_time_sec = os::elapsedTime();
2771 
2772   start_recording_regions();
2773 
2774   guarantee(target_pause_time_ms > 0.0,
2775             err_msg("target_pause_time_ms = %1.6lf should be positive",
2776                     target_pause_time_ms));
2777   guarantee(_collection_set == NULL, "Precondition");
2778 
2779   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2780   double predicted_pause_time_ms = base_time_ms;
2781 
2782   double time_remaining_ms = target_pause_time_ms - base_time_ms;
2783 
2784   // the 10% and 50% values are arbitrary...
2785   if (time_remaining_ms < 0.10 * target_pause_time_ms) {
2786     time_remaining_ms = 0.50 * target_pause_time_ms;
2787     _within_target = false;
2788   } else {
2789     _within_target = true;
2790   }
2791 
2792   // We figure out the number of bytes available for future to-space.
2793   // For new regions without marking information, we must assume the
2794   // worst-case of complete survival.  If we have marking information for a
2795   // region, we can bound the amount of live data.  We can add a number of
2796   // such regions, as long as the sum of the live data bounds does not
2797   // exceed the available evacuation space.
2798   size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
2799 
2800   size_t expansion_bytes =
2801     _g1->expansion_regions() * HeapRegion::GrainBytes;
2802 
2803   _collection_set_bytes_used_before = 0;
2804   _collection_set_size = 0;
2805 
2806   // Adjust for expansion and slop.
2807   max_live_bytes = max_live_bytes + expansion_bytes;
2808 
2809   HeapRegion* hr;
2810   double young_start_time_sec = os::elapsedTime();
2811 
2812   if (G1PolicyVerbose > 0) {
2813     gclog_or_tty->print_cr("Adding %d young regions to the CSet",
2814       _g1->young_list()->length());
2815   }
2816 
2817   _young_cset_length  = 0;
2818   _last_young_gc_full = full_young_gcs() ? true : false;
2819 
2820   if (_last_young_gc_full)
2821     ++_full_young_pause_num;
2822   else
2823     ++_partial_young_pause_num;
2824 
2825   // The young list is laid with the survivor regions from the previous
2826   // pause are appended to the RHS of the young list, i.e.
2827   //   [Newly Young Regions ++ Survivors from last pause].
2828 
2829   hr = _g1->young_list()->first_survivor_region();
2830   while (hr != NULL) {
2831     assert(hr->is_survivor(), "badly formed young list");
2832     hr->set_young();
2833     hr = hr->get_next_young_region();
2834   }
2835 
2836   // Clear the fields that point to the survivor list - they are
2837   // all young now.
2838   _g1->young_list()->clear_survivors();
2839 
2840   if (_g1->mark_in_progress())
2841     _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
2842 
2843   _young_cset_length = _inc_cset_young_index;
2844   _collection_set = _inc_cset_head;
2845   _collection_set_size = _inc_cset_size;
2846   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2847 
2848   // For young regions in the collection set, we assume the worst
2849   // case of complete survival
2850   max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
2851 
2852   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
2853   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
2854 
2855   // The number of recorded young regions is the incremental
2856   // collection set's current size
2857   set_recorded_young_regions(_inc_cset_size);
2858   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2859   set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
2860 #if PREDICTIONS_VERBOSE
2861   set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
2862 #endif // PREDICTIONS_VERBOSE
2863 
2864   if (G1PolicyVerbose > 0) {
2865     gclog_or_tty->print_cr("  Added " PTR_FORMAT " Young Regions to CS.",
2866       _inc_cset_size);
2867     gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
2868       max_live_bytes/K);
2869   }
2870 
2871   assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
2872 
2873   double young_end_time_sec = os::elapsedTime();
2874   _recorded_young_cset_choice_time_ms =
2875     (young_end_time_sec - young_start_time_sec) * 1000.0;
2876 
2877   // We are doing young collections so reset this.
2878   non_young_start_time_sec = young_end_time_sec;
2879 
2880   // Note we can use either _collection_set_size or
2881   // _young_cset_length here
2882   if (_collection_set_size > 0 && _last_young_gc_full) {
2883     // don't bother adding more regions...
2884     goto choose_collection_set_end;
2885   }
2886 
2887   if (!full_young_gcs()) {
2888     bool should_continue = true;
2889     NumberSeq seq;
2890     double avg_prediction = 100000000000000000.0; // something very large
2891 
2892     do {
2893       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
2894                                                       avg_prediction);
2895       if (hr != NULL) {
2896         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
2897         time_remaining_ms -= predicted_time_ms;
2898         predicted_pause_time_ms += predicted_time_ms;
2899         add_to_collection_set(hr);
2900         record_non_young_cset_region(hr);
2901         max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
2902         if (G1PolicyVerbose > 0) {
2903           gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
2904                         max_live_bytes/K);
2905         }
2906         seq.add(predicted_time_ms);
2907         avg_prediction = seq.avg() + seq.sd();
2908       }
2909       should_continue =
2910         ( hr != NULL) &&
2911         ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
2912           : _collection_set_size < _young_list_fixed_length );
2913     } while (should_continue);
2914 
2915     if (!adaptive_young_list_length() &&
2916         _collection_set_size < _young_list_fixed_length)
2917       _should_revert_to_full_young_gcs  = true;
2918   }
2919 
2920 choose_collection_set_end:
2921   stop_incremental_cset_building();
2922 
2923   count_CS_bytes_used();
2924 
2925   end_recording_regions();
2926 
2927   double non_young_end_time_sec = os::elapsedTime();
2928   _recorded_non_young_cset_choice_time_ms =
2929     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
2930 }
2931 
2932 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
2933   G1CollectorPolicy::record_full_collection_end();
2934   _collectionSetChooser->updateAfterFullCollection();
2935 }
2936 
2937 void G1CollectorPolicy_BestRegionsFirst::
2938 expand_if_possible(size_t numRegions) {
2939   size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
2940   _g1->expand(expansion_bytes);
2941 }
2942 
2943 void G1CollectorPolicy_BestRegionsFirst::
2944 record_collection_pause_end() {
2945   G1CollectorPolicy::record_collection_pause_end();
2946   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
2947 }