1 /*
   2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  27 #include "gc_implementation/g1/concurrentMark.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  34 #include "runtime/arguments.hpp"
  35 #include "runtime/java.hpp"
  36 #include "runtime/mutexLocker.hpp"
  37 #include "utilities/debug.hpp"
  38 
  39 #define PREDICTIONS_VERBOSE 0
  40 
  41 // <NEW PREDICTION>
  42 
  43 // Different defaults for different number of GC threads
  44 // They were chosen by running GCOld and SPECjbb on debris with different
  45 //   numbers of GC threads and choosing them based on the results
  46 
  47 // all the same
  48 static double rs_length_diff_defaults[] = {
  49   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  50 };
  51 
  52 static double cost_per_card_ms_defaults[] = {
  53   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
  54 };
  55 
  56 // all the same
  57 static double fully_young_cards_per_entry_ratio_defaults[] = {
  58   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
  59 };
  60 
  61 static double cost_per_entry_ms_defaults[] = {
  62   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
  63 };
  64 
  65 static double cost_per_byte_ms_defaults[] = {
  66   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
  67 };
  68 
  69 // these should be pretty consistent
  70 static double constant_other_time_ms_defaults[] = {
  71   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
  72 };
  73 
  74 
  75 static double young_other_cost_per_region_ms_defaults[] = {
  76   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
  77 };
  78 
  79 static double non_young_other_cost_per_region_ms_defaults[] = {
  80   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
  81 };
  82 
  83 // </NEW PREDICTION>
  84 
  85 // Help class for avoiding interleaved logging
  86 class LineBuffer: public StackObj {
  87 
  88 private:
  89   static const int BUFFER_LEN = 1024;
  90   static const int INDENT_CHARS = 3;
  91   char _buffer[BUFFER_LEN];
  92   int _indent_level;
  93   int _cur;
  94 
  95   void vappend(const char* format, va_list ap) {
  96     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
  97     if (res != -1) {
  98       _cur += res;
  99     } else {
 100       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
 101       _buffer[BUFFER_LEN -1] = 0;
 102       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
 103     }
 104   }
 105 
 106 public:
 107   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
 108     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
 109       _buffer[_cur] = ' ';
 110     }
 111   }
 112 
 113 #ifndef PRODUCT
 114   ~LineBuffer() {
 115     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
 116   }
 117 #endif
 118 
 119   void append(const char* format, ...) {
 120     va_list ap;
 121     va_start(ap, format);
 122     vappend(format, ap);
 123     va_end(ap);
 124   }
 125 
 126   void append_and_print_cr(const char* format, ...) {
 127     va_list ap;
 128     va_start(ap, format);
 129     vappend(format, ap);
 130     va_end(ap);
 131     gclog_or_tty->print_cr("%s", _buffer);
 132     _cur = _indent_level * INDENT_CHARS;
 133   }
 134 };
 135 
 136 G1CollectorPolicy::G1CollectorPolicy() :
 137   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
 138                         ? ParallelGCThreads : 1),
 139 
 140   _n_pauses(0),
 141   _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 142   _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 143   _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
 144   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 145   _all_pause_times_ms(new NumberSeq()),
 146   _stop_world_start(0.0),
 147   _all_stop_world_times_ms(new NumberSeq()),
 148   _all_yield_times_ms(new NumberSeq()),
 149   _using_new_ratio_calculations(false),
 150 
 151   _all_mod_union_times_ms(new NumberSeq()),
 152 
 153   _summary(new Summary()),
 154 
 155 #ifndef PRODUCT
 156   _cur_clear_ct_time_ms(0.0),
 157   _min_clear_cc_time_ms(-1.0),
 158   _max_clear_cc_time_ms(-1.0),
 159   _cur_clear_cc_time_ms(0.0),
 160   _cum_clear_cc_time_ms(0.0),
 161   _num_cc_clears(0L),
 162 #endif
 163 
 164   _region_num_young(0),
 165   _region_num_tenured(0),
 166   _prev_region_num_young(0),
 167   _prev_region_num_tenured(0),
 168 
 169   _aux_num(10),
 170   _all_aux_times_ms(new NumberSeq[_aux_num]),
 171   _cur_aux_start_times_ms(new double[_aux_num]),
 172   _cur_aux_times_ms(new double[_aux_num]),
 173   _cur_aux_times_set(new bool[_aux_num]),
 174 
 175   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 176   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 177 
 178   // <NEW PREDICTION>
 179 
 180   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 181   _prev_collection_pause_end_ms(0.0),
 182   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 183   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 184   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 185   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 186   _partially_young_cards_per_entry_ratio_seq(
 187                                          new TruncatedSeq(TruncatedSeqLength)),
 188   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 189   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 190   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 191   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 192   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 193   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 194   _non_young_other_cost_per_region_ms_seq(
 195                                          new TruncatedSeq(TruncatedSeqLength)),
 196 
 197   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 198   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 199   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 200 
 201   _pause_time_target_ms((double) MaxGCPauseMillis),
 202 
 203   // </NEW PREDICTION>
 204 
 205   _full_young_gcs(true),
 206   _full_young_pause_num(0),
 207   _partial_young_pause_num(0),
 208 
 209   _during_marking(false),
 210   _in_marking_window(false),
 211   _in_marking_window_im(false),
 212 
 213   _known_garbage_ratio(0.0),
 214   _known_garbage_bytes(0),
 215 
 216   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
 217 
 218    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
 219 
 220   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
 221   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
 222 
 223   _recent_avg_pause_time_ratio(0.0),
 224   _num_markings(0),
 225   _n_marks(0),
 226   _n_pauses_at_mark_end(0),
 227 
 228   _all_full_gc_times_ms(new NumberSeq()),
 229 
 230   // G1PausesBtwnConcMark defaults to -1
 231   // so the hack is to do the cast  QQQ FIXME
 232   _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
 233   _n_marks_since_last_pause(0),
 234   _initiate_conc_mark_if_possible(false),
 235   _during_initial_mark_pause(false),
 236   _should_revert_to_full_young_gcs(false),
 237   _last_full_young_gc(false),
 238 
 239   _eden_bytes_before_gc(0),
 240   _survivor_bytes_before_gc(0),
 241   _capacity_before_gc(0),
 242 
 243   _prev_collection_pause_used_at_end_bytes(0),
 244 
 245   _collection_set(NULL),
 246   _collection_set_size(0),
 247   _collection_set_bytes_used_before(0),
 248 
 249   // Incremental CSet attributes
 250   _inc_cset_build_state(Inactive),
 251   _inc_cset_head(NULL),
 252   _inc_cset_tail(NULL),
 253   _inc_cset_size(0),
 254   _inc_cset_young_index(0),
 255   _inc_cset_bytes_used_before(0),
 256   _inc_cset_max_finger(NULL),
 257   _inc_cset_recorded_young_bytes(0),
 258   _inc_cset_recorded_rs_lengths(0),
 259   _inc_cset_predicted_elapsed_time_ms(0.0),
 260   _inc_cset_predicted_bytes_to_copy(0),
 261 
 262 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 263 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 264 #endif // _MSC_VER
 265 
 266   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
 267                                                  G1YoungSurvRateNumRegionsSummary)),
 268   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
 269                                               G1YoungSurvRateNumRegionsSummary)),
 270   // add here any more surv rate groups
 271   _recorded_survivor_regions(0),
 272   _recorded_survivor_head(NULL),
 273   _recorded_survivor_tail(NULL),
 274   _survivors_age_table(true),
 275 
 276   _gc_overhead_perc(0.0) {
 277 
 278   // Set up the region size and associated fields. Given that the
 279   // policy is created before the heap, we have to set this up here,
 280   // so it's done as soon as possible.
 281   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
 282   HeapRegionRemSet::setup_remset_size();
 283 
 284   G1ErgoVerbose::initialize();
 285   if (PrintAdaptiveSizePolicy) {
 286     // Currently, we only use a single switch for all the heuristics.
 287     G1ErgoVerbose::set_enabled(true);
 288     // Given that we don't currently have a verboseness level
 289     // parameter, we'll hardcode this to high. This can be easily
 290     // changed in the future.
 291     G1ErgoVerbose::set_level(ErgoHigh);
 292   } else {
 293     G1ErgoVerbose::set_enabled(false);
 294   }
 295 
 296   // Verify PLAB sizes
 297   const uint region_size = HeapRegion::GrainWords;
 298   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
 299     char buffer[128];
 300     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
 301                  OldPLABSize > region_size ? "Old" : "Young", region_size);
 302     vm_exit_during_initialization(buffer);
 303   }
 304 
 305   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 306   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 307 
 308   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
 309   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
 310   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
 311 
 312   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
 313   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
 314 
 315   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
 316 
 317   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
 318 
 319   _par_last_termination_times_ms = new double[_parallel_gc_threads];
 320   _par_last_termination_attempts = new double[_parallel_gc_threads];
 321   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
 322   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
 323 
 324   // start conservatively
 325   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
 326 
 327   // <NEW PREDICTION>
 328 
 329   int index;
 330   if (ParallelGCThreads == 0)
 331     index = 0;
 332   else if (ParallelGCThreads > 8)
 333     index = 7;
 334   else
 335     index = ParallelGCThreads - 1;
 336 
 337   _pending_card_diff_seq->add(0.0);
 338   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 339   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 340   _fully_young_cards_per_entry_ratio_seq->add(
 341                             fully_young_cards_per_entry_ratio_defaults[index]);
 342   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 343   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 344   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 345   _young_other_cost_per_region_ms_seq->add(
 346                                young_other_cost_per_region_ms_defaults[index]);
 347   _non_young_other_cost_per_region_ms_seq->add(
 348                            non_young_other_cost_per_region_ms_defaults[index]);
 349 
 350   // </NEW PREDICTION>
 351 
 352   // Below, we might need to calculate the pause time target based on
 353   // the pause interval. When we do so we are going to give G1 maximum
 354   // flexibility and allow it to do pauses when it needs to. So, we'll
 355   // arrange that the pause interval to be pause time target + 1 to
 356   // ensure that a) the pause time target is maximized with respect to
 357   // the pause interval and b) we maintain the invariant that pause
 358   // time target < pause interval. If the user does not want this
 359   // maximum flexibility, they will have to set the pause interval
 360   // explicitly.
 361 
 362   // First make sure that, if either parameter is set, its value is
 363   // reasonable.
 364   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 365     if (MaxGCPauseMillis < 1) {
 366       vm_exit_during_initialization("MaxGCPauseMillis should be "
 367                                     "greater than 0");
 368     }
 369   }
 370   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 371     if (GCPauseIntervalMillis < 1) {
 372       vm_exit_during_initialization("GCPauseIntervalMillis should be "
 373                                     "greater than 0");
 374     }
 375   }
 376 
 377   // Then, if the pause time target parameter was not set, set it to
 378   // the default value.
 379   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 380     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 381       // The default pause time target in G1 is 200ms
 382       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
 383     } else {
 384       // We do not allow the pause interval to be set without the
 385       // pause time target
 386       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
 387                                     "without setting MaxGCPauseMillis");
 388     }
 389   }
 390 
 391   // Then, if the interval parameter was not set, set it according to
 392   // the pause time target (this will also deal with the case when the
 393   // pause time target is the default value).
 394   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 395     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 396   }
 397 
 398   // Finally, make sure that the two parameters are consistent.
 399   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 400     char buffer[256];
 401     jio_snprintf(buffer, 256,
 402                  "MaxGCPauseMillis (%u) should be less than "
 403                  "GCPauseIntervalMillis (%u)",
 404                  MaxGCPauseMillis, GCPauseIntervalMillis);
 405     vm_exit_during_initialization(buffer);
 406   }
 407 
 408   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 409   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 410   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 411   _sigma = (double) G1ConfidencePercent / 100.0;
 412 
 413   // start conservatively (around 50ms is about right)
 414   _concurrent_mark_remark_times_ms->add(0.05);
 415   _concurrent_mark_cleanup_times_ms->add(0.20);
 416   _tenuring_threshold = MaxTenuringThreshold;
 417   // _max_survivor_regions will be calculated by
 418   // update_young_list_target_length() during initialization.
 419   _max_survivor_regions = 0;
 420 
 421   assert(GCTimeRatio > 0,
 422          "we should have set it to a default value set_g1_gc_flags() "
 423          "if a user set it to 0");
 424   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 425 
 426   uintx reserve_perc = G1ReservePercent;
 427   // Put an artificial ceiling on this so that it's not set to a silly value.
 428   if (reserve_perc > 50) {
 429     reserve_perc = 50;
 430     warning("G1ReservePercent is set to a value that is too large, "
 431             "it's been updated to %u", reserve_perc);
 432   }
 433   _reserve_factor = (double) reserve_perc / 100.0;
 434   // This will be set when the heap is expanded
 435   // for the first time during initialization.
 436   _reserve_regions = 0;
 437 
 438   initialize_all();
 439 }
 440 
 441 // Increment "i", mod "len"
 442 static void inc_mod(int& i, int len) {
 443   i++; if (i == len) i = 0;
 444 }
 445 
 446 void G1CollectorPolicy::initialize_flags() {
 447   set_min_alignment(HeapRegion::GrainBytes);
 448   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
 449   if (SurvivorRatio < 1) {
 450     vm_exit_during_initialization("Invalid survivor ratio specified");
 451   }
 452   CollectorPolicy::initialize_flags();
 453 }
 454 
 455 // The easiest way to deal with the parsing of the NewSize /
 456 // MaxNewSize / etc. parameteres is to re-use the code in the
 457 // TwoGenerationCollectorPolicy class. This is similar to what
 458 // ParallelScavenge does with its GenerationSizer class (see
 459 // ParallelScavengeHeap::initialize()). We might change this in the
 460 // future, but it's a good start.
 461 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
 462 
 463 public:
 464   G1YoungGenSizer() {
 465     initialize_flags();
 466     initialize_size_info();
 467   }
 468   size_t size_to_region_num(size_t byte_size) {
 469     return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
 470   }
 471   size_t min_young_region_num() {
 472     return size_to_region_num(_min_gen0_size);
 473   }
 474   size_t initial_young_region_num() {
 475     return size_to_region_num(_initial_gen0_size);
 476   }
 477   size_t max_young_region_num() {
 478     return size_to_region_num(_max_gen0_size);
 479   }
 480 };
 481 
 482 void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) {
 483   assert(number_of_heap_regions > 0, "Heap must be initialized");
 484   size_t young_size = number_of_heap_regions / (NewRatio + 1);
 485   _min_desired_young_length = young_size;
 486   _max_desired_young_length = young_size;
 487 }
 488 
 489 void G1CollectorPolicy::init() {
 490   // Set aside an initial future to_space.
 491   _g1 = G1CollectedHeap::heap();
 492 
 493   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 494 
 495   initialize_gc_policy_counters();
 496 
 497   G1YoungGenSizer sizer;
 498   size_t initial_region_num = sizer.initial_young_region_num();
 499   _min_desired_young_length = sizer.min_young_region_num();
 500   _max_desired_young_length = sizer.max_young_region_num();
 501 
 502   if (FLAG_IS_CMDLINE(NewRatio)) {
 503     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
 504       gclog_or_tty->print_cr("-XX:NewSize and -XX:MaxNewSize overrides -XX:NewRatio");
 505     } else {
 506       // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
 507       size_t heap_regions = sizer.size_to_region_num(_g1->n_regions());
 508       update_young_list_size_using_newratio(heap_regions);
 509       _using_new_ratio_calculations = true;
 510     }
 511   }
 512 
 513   // GenCollectorPolicy guarantees that min <= initial <= max.
 514   // Asserting here just to state that we rely on this property.
 515   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
 516   assert(initial_region_num <= _max_desired_young_length, "Initial young gen size too large");
 517   assert(_min_desired_young_length <= initial_region_num, "Initial young gen size too small");
 518 
 519   set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
 520   if (adaptive_young_list_length()) {
 521     _young_list_fixed_length = 0;
 522   } else {
 523     _young_list_fixed_length = initial_region_num;
 524   }
 525   _free_regions_at_end_of_collection = _g1->free_regions();
 526   update_young_list_target_length();
 527   _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
 528 
 529   // We may immediately start allocating regions and placing them on the
 530   // collection set list. Initialize the per-collection set info
 531   start_incremental_cset_building();
 532 }
 533 
 534 // Create the jstat counters for the policy.
 535 void G1CollectorPolicy::initialize_gc_policy_counters() {
 536   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 537 }
 538 
 539 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
 540                                          double base_time_ms,
 541                                          size_t base_free_regions,
 542                                          double target_pause_time_ms) {
 543   if (young_length >= base_free_regions) {
 544     // end condition 1: not enough space for the young regions
 545     return false;
 546   }
 547 
 548   double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
 549   size_t bytes_to_copy =
 550                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 551   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
 552   double young_other_time_ms = predict_young_other_time_ms(young_length);
 553   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
 554   if (pause_time_ms > target_pause_time_ms) {
 555     // end condition 2: prediction is over the target pause time
 556     return false;
 557   }
 558 
 559   size_t free_bytes =
 560                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
 561   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
 562     // end condition 3: out-of-space (conservatively!)
 563     return false;
 564   }
 565 
 566   // success!
 567   return true;
 568 }
 569 
 570 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
 571   // re-calculate the necessary reserve
 572   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
 573   // We use ceiling so that if reserve_regions_d is > 0.0 (but
 574   // smaller than 1.0) we'll get 1.
 575   _reserve_regions = (size_t) ceil(reserve_regions_d);
 576 
 577   if (_using_new_ratio_calculations) {
 578     // -XX:NewRatio was specified so we need to update the
 579     // young gen length when the heap size has changed.
 580     update_young_list_size_using_newratio(new_number_of_regions);
 581   }
 582 }
 583 
 584 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
 585                                                      size_t base_min_length) {
 586   size_t desired_min_length = 0;
 587   if (adaptive_young_list_length()) {
 588     if (_alloc_rate_ms_seq->num() > 3) {
 589       double now_sec = os::elapsedTime();
 590       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 591       double alloc_rate_ms = predict_alloc_rate_ms();
 592       desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
 593     } else {
 594       // otherwise we don't have enough info to make the prediction
 595     }
 596   }
 597   desired_min_length += base_min_length;
 598   // make sure we don't go below any user-defined minimum bound
 599   return MAX2(_min_desired_young_length, desired_min_length);
 600 }
 601 
 602 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
 603   // Here, we might want to also take into account any additional
 604   // constraints (i.e., user-defined minimum bound). Currently, we
 605   // effectively don't set this bound.
 606   return _max_desired_young_length;
 607 }
 608 
 609 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
 610   if (rs_lengths == (size_t) -1) {
 611     // if it's set to the default value (-1), we should predict it;
 612     // otherwise, use the given value.
 613     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
 614   }
 615 
 616   // Calculate the absolute and desired min bounds.
 617 
 618   // This is how many young regions we already have (currently: the survivors).
 619   size_t base_min_length = recorded_survivor_regions();
 620   // This is the absolute minimum young length, which ensures that we
 621   // can allocate one eden region in the worst-case.
 622   size_t absolute_min_length = base_min_length + 1;
 623   size_t desired_min_length =
 624                      calculate_young_list_desired_min_length(base_min_length);
 625   if (desired_min_length < absolute_min_length) {
 626     desired_min_length = absolute_min_length;
 627   }
 628 
 629   // Calculate the absolute and desired max bounds.
 630 
 631   // We will try our best not to "eat" into the reserve.
 632   size_t absolute_max_length = 0;
 633   if (_free_regions_at_end_of_collection > _reserve_regions) {
 634     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
 635   }
 636   size_t desired_max_length = calculate_young_list_desired_max_length();
 637   if (desired_max_length > absolute_max_length) {
 638     desired_max_length = absolute_max_length;
 639   }
 640 
 641   size_t young_list_target_length = 0;
 642   if (adaptive_young_list_length()) {
 643     if (full_young_gcs()) {
 644       young_list_target_length =
 645                         calculate_young_list_target_length(rs_lengths,
 646                                                            base_min_length,
 647                                                            desired_min_length,
 648                                                            desired_max_length);
 649       _rs_lengths_prediction = rs_lengths;
 650     } else {
 651       // Don't calculate anything and let the code below bound it to
 652       // the desired_min_length, i.e., do the next GC as soon as
 653       // possible to maximize how many old regions we can add to it.
 654     }
 655   } else {
 656     if (full_young_gcs()) {
 657       young_list_target_length = _young_list_fixed_length;
 658     } else {
 659       // A bit arbitrary: during partially-young GCs we allocate half
 660       // the young regions to try to add old regions to the CSet.
 661       young_list_target_length = _young_list_fixed_length / 2;
 662       // We choose to accept that we might go under the desired min
 663       // length given that we intentionally ask for a smaller young gen.
 664       desired_min_length = absolute_min_length;
 665     }
 666   }
 667 
 668   // Make sure we don't go over the desired max length, nor under the
 669   // desired min length. In case they clash, desired_min_length wins
 670   // which is why that test is second.
 671   if (young_list_target_length > desired_max_length) {
 672     young_list_target_length = desired_max_length;
 673   }
 674   if (young_list_target_length < desired_min_length) {
 675     young_list_target_length = desired_min_length;
 676   }
 677 
 678   assert(young_list_target_length > recorded_survivor_regions(),
 679          "we should be able to allocate at least one eden region");
 680   assert(young_list_target_length >= absolute_min_length, "post-condition");
 681   _young_list_target_length = young_list_target_length;
 682 
 683   update_max_gc_locker_expansion();
 684 }
 685 
 686 size_t
 687 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
 688                                                    size_t base_min_length,
 689                                                    size_t desired_min_length,
 690                                                    size_t desired_max_length) {
 691   assert(adaptive_young_list_length(), "pre-condition");
 692   assert(full_young_gcs(), "only call this for fully-young GCs");
 693 
 694   // In case some edge-condition makes the desired max length too small...
 695   if (desired_max_length <= desired_min_length) {
 696     return desired_min_length;
 697   }
 698 
 699   // We'll adjust min_young_length and max_young_length not to include
 700   // the already allocated young regions (i.e., so they reflect the
 701   // min and max eden regions we'll allocate). The base_min_length
 702   // will be reflected in the predictions by the
 703   // survivor_regions_evac_time prediction.
 704   assert(desired_min_length > base_min_length, "invariant");
 705   size_t min_young_length = desired_min_length - base_min_length;
 706   assert(desired_max_length > base_min_length, "invariant");
 707   size_t max_young_length = desired_max_length - base_min_length;
 708 
 709   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 710   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
 711   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
 712   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
 713   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
 714   double base_time_ms =
 715     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
 716     survivor_regions_evac_time;
 717   size_t available_free_regions = _free_regions_at_end_of_collection;
 718   size_t base_free_regions = 0;
 719   if (available_free_regions > _reserve_regions) {
 720     base_free_regions = available_free_regions - _reserve_regions;
 721   }
 722 
 723   // Here, we will make sure that the shortest young length that
 724   // makes sense fits within the target pause time.
 725 
 726   if (predict_will_fit(min_young_length, base_time_ms,
 727                        base_free_regions, target_pause_time_ms)) {
 728     // The shortest young length will fit into the target pause time;
 729     // we'll now check whether the absolute maximum number of young
 730     // regions will fit in the target pause time. If not, we'll do
 731     // a binary search between min_young_length and max_young_length.
 732     if (predict_will_fit(max_young_length, base_time_ms,
 733                          base_free_regions, target_pause_time_ms)) {
 734       // The maximum young length will fit into the target pause time.
 735       // We are done so set min young length to the maximum length (as
 736       // the result is assumed to be returned in min_young_length).
 737       min_young_length = max_young_length;
 738     } else {
 739       // The maximum possible number of young regions will not fit within
 740       // the target pause time so we'll search for the optimal
 741       // length. The loop invariants are:
 742       //
 743       // min_young_length < max_young_length
 744       // min_young_length is known to fit into the target pause time
 745       // max_young_length is known not to fit into the target pause time
 746       //
 747       // Going into the loop we know the above hold as we've just
 748       // checked them. Every time around the loop we check whether
 749       // the middle value between min_young_length and
 750       // max_young_length fits into the target pause time. If it
 751       // does, it becomes the new min. If it doesn't, it becomes
 752       // the new max. This way we maintain the loop invariants.
 753 
 754       assert(min_young_length < max_young_length, "invariant");
 755       size_t diff = (max_young_length - min_young_length) / 2;
 756       while (diff > 0) {
 757         size_t young_length = min_young_length + diff;
 758         if (predict_will_fit(young_length, base_time_ms,
 759                              base_free_regions, target_pause_time_ms)) {
 760           min_young_length = young_length;
 761         } else {
 762           max_young_length = young_length;
 763         }
 764         assert(min_young_length <  max_young_length, "invariant");
 765         diff = (max_young_length - min_young_length) / 2;
 766       }
 767       // The results is min_young_length which, according to the
 768       // loop invariants, should fit within the target pause time.
 769 
 770       // These are the post-conditions of the binary search above:
 771       assert(min_young_length < max_young_length,
 772              "otherwise we should have discovered that max_young_length "
 773              "fits into the pause target and not done the binary search");
 774       assert(predict_will_fit(min_young_length, base_time_ms,
 775                               base_free_regions, target_pause_time_ms),
 776              "min_young_length, the result of the binary search, should "
 777              "fit into the pause target");
 778       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
 779                                base_free_regions, target_pause_time_ms),
 780              "min_young_length, the result of the binary search, should be "
 781              "optimal, so no larger length should fit into the pause target");
 782     }
 783   } else {
 784     // Even the minimum length doesn't fit into the pause time
 785     // target, return it as the result nevertheless.
 786   }
 787   return base_min_length + min_young_length;
 788 }
 789 
 790 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
 791   double survivor_regions_evac_time = 0.0;
 792   for (HeapRegion * r = _recorded_survivor_head;
 793        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
 794        r = r->get_next_young_region()) {
 795     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
 796   }
 797   return survivor_regions_evac_time;
 798 }
 799 
 800 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
 801   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 802 
 803   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
 804   if (rs_lengths > _rs_lengths_prediction) {
 805     // add 10% to avoid having to recalculate often
 806     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 807     update_young_list_target_length(rs_lengths_prediction);
 808   }
 809 }
 810 
 811 
 812 
 813 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
 814                                                bool is_tlab,
 815                                                bool* gc_overhead_limit_was_exceeded) {
 816   guarantee(false, "Not using this policy feature yet.");
 817   return NULL;
 818 }
 819 
 820 // This method controls how a collector handles one or more
 821 // of its generations being fully allocated.
 822 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
 823                                                        bool is_tlab) {
 824   guarantee(false, "Not using this policy feature yet.");
 825   return NULL;
 826 }
 827 
 828 
 829 #ifndef PRODUCT
 830 bool G1CollectorPolicy::verify_young_ages() {
 831   HeapRegion* head = _g1->young_list()->first_region();
 832   return
 833     verify_young_ages(head, _short_lived_surv_rate_group);
 834   // also call verify_young_ages on any additional surv rate groups
 835 }
 836 
 837 bool
 838 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 839                                      SurvRateGroup *surv_rate_group) {
 840   guarantee( surv_rate_group != NULL, "pre-condition" );
 841 
 842   const char* name = surv_rate_group->name();
 843   bool ret = true;
 844   int prev_age = -1;
 845 
 846   for (HeapRegion* curr = head;
 847        curr != NULL;
 848        curr = curr->get_next_young_region()) {
 849     SurvRateGroup* group = curr->surv_rate_group();
 850     if (group == NULL && !curr->is_survivor()) {
 851       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
 852       ret = false;
 853     }
 854 
 855     if (surv_rate_group == group) {
 856       int age = curr->age_in_surv_rate_group();
 857 
 858       if (age < 0) {
 859         gclog_or_tty->print_cr("## %s: encountered negative age", name);
 860         ret = false;
 861       }
 862 
 863       if (age <= prev_age) {
 864         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
 865                                "(%d, %d)", name, age, prev_age);
 866         ret = false;
 867       }
 868       prev_age = age;
 869     }
 870   }
 871 
 872   return ret;
 873 }
 874 #endif // PRODUCT
 875 
 876 void G1CollectorPolicy::record_full_collection_start() {
 877   _cur_collection_start_sec = os::elapsedTime();
 878   // Release the future to-space so that it is available for compaction into.
 879   _g1->set_full_collection();
 880 }
 881 
 882 void G1CollectorPolicy::record_full_collection_end() {
 883   // Consider this like a collection pause for the purposes of allocation
 884   // since last pause.
 885   double end_sec = os::elapsedTime();
 886   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
 887   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 888 
 889   _all_full_gc_times_ms->add(full_gc_time_ms);
 890 
 891   update_recent_gc_times(end_sec, full_gc_time_ms);
 892 
 893   _g1->clear_full_collection();
 894 
 895   // "Nuke" the heuristics that control the fully/partially young GC
 896   // transitions and make sure we start with fully young GCs after the
 897   // Full GC.
 898   set_full_young_gcs(true);
 899   _last_full_young_gc = false;
 900   _should_revert_to_full_young_gcs = false;
 901   clear_initiate_conc_mark_if_possible();
 902   clear_during_initial_mark_pause();
 903   _known_garbage_bytes = 0;
 904   _known_garbage_ratio = 0.0;
 905   _in_marking_window = false;
 906   _in_marking_window_im = false;
 907 
 908   _short_lived_surv_rate_group->start_adding_regions();
 909   // also call this on any additional surv rate groups
 910 
 911   record_survivor_regions(0, NULL, NULL);
 912 
 913   _prev_region_num_young   = _region_num_young;
 914   _prev_region_num_tenured = _region_num_tenured;
 915 
 916   _free_regions_at_end_of_collection = _g1->free_regions();
 917   // Reset survivors SurvRateGroup.
 918   _survivor_surv_rate_group->reset();
 919   update_young_list_target_length();
 920 }
 921 
 922 void G1CollectorPolicy::record_stop_world_start() {
 923   _stop_world_start = os::elapsedTime();
 924 }
 925 
 926 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
 927                                                       size_t start_used) {
 928   if (PrintGCDetails) {
 929     gclog_or_tty->stamp(PrintGCTimeStamps);
 930     gclog_or_tty->print("[GC pause");
 931     gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
 932   }
 933 
 934   // We only need to do this here as the policy will only be applied
 935   // to the GC we're about to start. so, no point is calculating this
 936   // every time we calculate / recalculate the target young length.
 937   update_survivors_policy();
 938 
 939   assert(_g1->used() == _g1->recalculate_used(),
 940          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 941                  _g1->used(), _g1->recalculate_used()));
 942 
 943   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 944   _all_stop_world_times_ms->add(s_w_t_ms);
 945   _stop_world_start = 0.0;
 946 
 947   _cur_collection_start_sec = start_time_sec;
 948   _cur_collection_pause_used_at_start_bytes = start_used;
 949   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
 950   _pending_cards = _g1->pending_card_num();
 951   _max_pending_cards = _g1->max_pending_card_num();
 952 
 953   _bytes_in_collection_set_before_gc = 0;
 954   _bytes_copied_during_gc = 0;
 955 
 956   YoungList* young_list = _g1->young_list();
 957   _eden_bytes_before_gc = young_list->eden_used_bytes();
 958   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
 959   _capacity_before_gc = _g1->capacity();
 960 
 961 #ifdef DEBUG
 962   // initialise these to something well known so that we can spot
 963   // if they are not set properly
 964 
 965   for (int i = 0; i < _parallel_gc_threads; ++i) {
 966     _par_last_gc_worker_start_times_ms[i] = -1234.0;
 967     _par_last_ext_root_scan_times_ms[i] = -1234.0;
 968     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
 969     _par_last_update_rs_times_ms[i] = -1234.0;
 970     _par_last_update_rs_processed_buffers[i] = -1234.0;
 971     _par_last_scan_rs_times_ms[i] = -1234.0;
 972     _par_last_obj_copy_times_ms[i] = -1234.0;
 973     _par_last_termination_times_ms[i] = -1234.0;
 974     _par_last_termination_attempts[i] = -1234.0;
 975     _par_last_gc_worker_end_times_ms[i] = -1234.0;
 976     _par_last_gc_worker_times_ms[i] = -1234.0;
 977   }
 978 #endif
 979 
 980   for (int i = 0; i < _aux_num; ++i) {
 981     _cur_aux_times_ms[i] = 0.0;
 982     _cur_aux_times_set[i] = false;
 983   }
 984 
 985   _satb_drain_time_set = false;
 986   _last_satb_drain_processed_buffers = -1;
 987 
 988   _last_young_gc_full = false;
 989 
 990   // do that for any other surv rate groups
 991   _short_lived_surv_rate_group->stop_adding_regions();
 992   _survivors_age_table.clear();
 993 
 994   assert( verify_young_ages(), "region age verification" );
 995 }
 996 
 997 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
 998   _mark_closure_time_ms = mark_closure_time_ms;
 999 }
1000 
1001 void G1CollectorPolicy::record_concurrent_mark_init_end(double
1002                                                    mark_init_elapsed_time_ms) {
1003   _during_marking = true;
1004   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
1005   clear_during_initial_mark_pause();
1006   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
1007 }
1008 
1009 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
1010   _mark_remark_start_sec = os::elapsedTime();
1011   _during_marking = false;
1012 }
1013 
1014 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
1015   double end_time_sec = os::elapsedTime();
1016   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
1017   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
1018   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1019   _prev_collection_pause_end_ms += elapsed_time_ms;
1020 
1021   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
1022 }
1023 
1024 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
1025   _mark_cleanup_start_sec = os::elapsedTime();
1026 }
1027 
1028 void
1029 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
1030                                                       size_t max_live_bytes) {
1031   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
1032   record_concurrent_mark_cleanup_end_work2();
1033 }
1034 
1035 void
1036 G1CollectorPolicy::
1037 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
1038                                          size_t max_live_bytes) {
1039   if (_n_marks < 2) {
1040     _n_marks++;
1041   }
1042 }
1043 
1044 // The important thing about this is that it includes "os::elapsedTime".
1045 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
1046   double end_time_sec = os::elapsedTime();
1047   double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
1048   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1049   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1050   _prev_collection_pause_end_ms += elapsed_time_ms;
1051 
1052   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
1053 
1054   _num_markings++;
1055   _n_pauses_at_mark_end = _n_pauses;
1056   _n_marks_since_last_pause++;
1057 }
1058 
1059 void
1060 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
1061   _should_revert_to_full_young_gcs = false;
1062   _last_full_young_gc = true;
1063   _in_marking_window = false;
1064 }
1065 
1066 void G1CollectorPolicy::record_concurrent_pause() {
1067   if (_stop_world_start > 0.0) {
1068     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
1069     _all_yield_times_ms->add(yield_ms);
1070   }
1071 }
1072 
1073 void G1CollectorPolicy::record_concurrent_pause_end() {
1074 }
1075 
1076 template<class T>
1077 T sum_of(T* sum_arr, int start, int n, int N) {
1078   T sum = (T)0;
1079   for (int i = 0; i < n; i++) {
1080     int j = (start + i) % N;
1081     sum += sum_arr[j];
1082   }
1083   return sum;
1084 }
1085 
1086 void G1CollectorPolicy::print_par_stats(int level,
1087                                         const char* str,
1088                                         double* data) {
1089   double min = data[0], max = data[0];
1090   double total = 0.0;
1091   LineBuffer buf(level);
1092   buf.append("[%s (ms):", str);
1093   for (uint i = 0; i < ParallelGCThreads; ++i) {
1094     double val = data[i];
1095     if (val < min)
1096       min = val;
1097     if (val > max)
1098       max = val;
1099     total += val;
1100     buf.append("  %3.1lf", val);
1101   }
1102   buf.append_and_print_cr("");
1103   double avg = total / (double) ParallelGCThreads;
1104   buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
1105     avg, min, max, max - min);
1106 }
1107 
1108 void G1CollectorPolicy::print_par_sizes(int level,
1109                                         const char* str,
1110                                         double* data) {
1111   double min = data[0], max = data[0];
1112   double total = 0.0;
1113   LineBuffer buf(level);
1114   buf.append("[%s :", str);
1115   for (uint i = 0; i < ParallelGCThreads; ++i) {
1116     double val = data[i];
1117     if (val < min)
1118       min = val;
1119     if (val > max)
1120       max = val;
1121     total += val;
1122     buf.append(" %d", (int) val);
1123   }
1124   buf.append_and_print_cr("");
1125   double avg = total / (double) ParallelGCThreads;
1126   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
1127     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
1128 }
1129 
1130 void G1CollectorPolicy::print_stats (int level,
1131                                      const char* str,
1132                                      double value) {
1133   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
1134 }
1135 
1136 void G1CollectorPolicy::print_stats (int level,
1137                                      const char* str,
1138                                      int value) {
1139   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
1140 }
1141 
1142 double G1CollectorPolicy::avg_value (double* data) {
1143   if (G1CollectedHeap::use_parallel_gc_threads()) {
1144     double ret = 0.0;
1145     for (uint i = 0; i < ParallelGCThreads; ++i)
1146       ret += data[i];
1147     return ret / (double) ParallelGCThreads;
1148   } else {
1149     return data[0];
1150   }
1151 }
1152 
1153 double G1CollectorPolicy::max_value (double* data) {
1154   if (G1CollectedHeap::use_parallel_gc_threads()) {
1155     double ret = data[0];
1156     for (uint i = 1; i < ParallelGCThreads; ++i)
1157       if (data[i] > ret)
1158         ret = data[i];
1159     return ret;
1160   } else {
1161     return data[0];
1162   }
1163 }
1164 
1165 double G1CollectorPolicy::sum_of_values (double* data) {
1166   if (G1CollectedHeap::use_parallel_gc_threads()) {
1167     double sum = 0.0;
1168     for (uint i = 0; i < ParallelGCThreads; i++)
1169       sum += data[i];
1170     return sum;
1171   } else {
1172     return data[0];
1173   }
1174 }
1175 
1176 double G1CollectorPolicy::max_sum (double* data1,
1177                                    double* data2) {
1178   double ret = data1[0] + data2[0];
1179 
1180   if (G1CollectedHeap::use_parallel_gc_threads()) {
1181     for (uint i = 1; i < ParallelGCThreads; ++i) {
1182       double data = data1[i] + data2[i];
1183       if (data > ret)
1184         ret = data;
1185     }
1186   }
1187   return ret;
1188 }
1189 
1190 // Anything below that is considered to be zero
1191 #define MIN_TIMER_GRANULARITY 0.0000001
1192 
1193 void G1CollectorPolicy::record_collection_pause_end() {
1194   double end_time_sec = os::elapsedTime();
1195   double elapsed_ms = _last_pause_time_ms;
1196   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1197   size_t rs_size =
1198     _cur_collection_pause_used_regions_at_start - collection_set_size();
1199   size_t cur_used_bytes = _g1->used();
1200   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1201   bool last_pause_included_initial_mark = false;
1202   bool update_stats = !_g1->evacuation_failed();
1203 
1204 #ifndef PRODUCT
1205   if (G1YoungSurvRateVerbose) {
1206     gclog_or_tty->print_cr("");
1207     _short_lived_surv_rate_group->print();
1208     // do that for any other surv rate groups too
1209   }
1210 #endif // PRODUCT
1211 
1212   last_pause_included_initial_mark = during_initial_mark_pause();
1213   if (last_pause_included_initial_mark)
1214     record_concurrent_mark_init_end(0.0);
1215 
1216   size_t marking_initiating_used_threshold =
1217     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
1218 
1219   if (!_g1->mark_in_progress() && !_last_full_young_gc) {
1220     assert(!last_pause_included_initial_mark, "invariant");
1221     if (cur_used_bytes > marking_initiating_used_threshold) {
1222       if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
1223         assert(!during_initial_mark_pause(), "we should not see this here");
1224 
1225         ergo_verbose3(ErgoConcCycles,
1226                       "request concurrent cycle initiation",
1227                       ergo_format_reason("occupancy higher than threshold")
1228                       ergo_format_byte("occupancy")
1229                       ergo_format_byte_perc("threshold"),
1230                       cur_used_bytes,
1231                       marking_initiating_used_threshold,
1232                       (double) InitiatingHeapOccupancyPercent);
1233 
1234         // Note: this might have already been set, if during the last
1235         // pause we decided to start a cycle but at the beginning of
1236         // this pause we decided to postpone it. That's OK.
1237         set_initiate_conc_mark_if_possible();
1238       } else {
1239         ergo_verbose2(ErgoConcCycles,
1240                   "do not request concurrent cycle initiation",
1241                   ergo_format_reason("occupancy lower than previous occupancy")
1242                   ergo_format_byte("occupancy")
1243                   ergo_format_byte("previous occupancy"),
1244                   cur_used_bytes,
1245                   _prev_collection_pause_used_at_end_bytes);
1246       }
1247     }
1248   }
1249 
1250   _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
1251 
1252   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
1253                           end_time_sec, false);
1254 
1255   guarantee(_cur_collection_pause_used_regions_at_start >=
1256             collection_set_size(),
1257             "Negative RS size?");
1258 
1259   // This assert is exempted when we're doing parallel collection pauses,
1260   // because the fragmentation caused by the parallel GC allocation buffers
1261   // can lead to more memory being used during collection than was used
1262   // before. Best leave this out until the fragmentation problem is fixed.
1263   // Pauses in which evacuation failed can also lead to negative
1264   // collections, since no space is reclaimed from a region containing an
1265   // object whose evacuation failed.
1266   // Further, we're now always doing parallel collection.  But I'm still
1267   // leaving this here as a placeholder for a more precise assertion later.
1268   // (DLD, 10/05.)
1269   assert((true || parallel) // Always using GC LABs now.
1270          || _g1->evacuation_failed()
1271          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
1272          "Negative collection");
1273 
1274   size_t freed_bytes =
1275     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
1276   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
1277 
1278   double survival_fraction =
1279     (double)surviving_bytes/
1280     (double)_collection_set_bytes_used_before;
1281 
1282   _n_pauses++;
1283 
1284   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
1285   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
1286   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
1287   double update_rs_processed_buffers =
1288     sum_of_values(_par_last_update_rs_processed_buffers);
1289   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
1290   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
1291   double termination_time = avg_value(_par_last_termination_times_ms);
1292 
1293   double parallel_known_time = update_rs_time +
1294                                ext_root_scan_time +
1295                                mark_stack_scan_time +
1296                                scan_rs_time +
1297                                obj_copy_time +
1298                                termination_time;
1299 
1300   double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
1301 
1302   PauseSummary* summary = _summary;
1303 
1304   if (update_stats) {
1305     _recent_rs_scan_times_ms->add(scan_rs_time);
1306     _recent_pause_times_ms->add(elapsed_ms);
1307     _recent_rs_sizes->add(rs_size);
1308 
1309     MainBodySummary* body_summary = summary->main_body_summary();
1310     guarantee(body_summary != NULL, "should not be null!");
1311 
1312     if (_satb_drain_time_set)
1313       body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
1314     else
1315       body_summary->record_satb_drain_time_ms(0.0);
1316 
1317     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
1318     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
1319     body_summary->record_update_rs_time_ms(update_rs_time);
1320     body_summary->record_scan_rs_time_ms(scan_rs_time);
1321     body_summary->record_obj_copy_time_ms(obj_copy_time);
1322     if (parallel) {
1323       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
1324       body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
1325       body_summary->record_termination_time_ms(termination_time);
1326       body_summary->record_parallel_other_time_ms(parallel_other_time);
1327     }
1328     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
1329 
1330     // We exempt parallel collection from this check because Alloc Buffer
1331     // fragmentation can produce negative collections.  Same with evac
1332     // failure.
1333     // Further, we're now always doing parallel collection.  But I'm still
1334     // leaving this here as a placeholder for a more precise assertion later.
1335     // (DLD, 10/05.
1336     assert((true || parallel)
1337            || _g1->evacuation_failed()
1338            || surviving_bytes <= _collection_set_bytes_used_before,
1339            "Or else negative collection!");
1340     _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
1341     _recent_CS_bytes_surviving->add(surviving_bytes);
1342 
1343     // this is where we update the allocation rate of the application
1344     double app_time_ms =
1345       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
1346     if (app_time_ms < MIN_TIMER_GRANULARITY) {
1347       // This usually happens due to the timer not having the required
1348       // granularity. Some Linuxes are the usual culprits.
1349       // We'll just set it to something (arbitrarily) small.
1350       app_time_ms = 1.0;
1351     }
1352     size_t regions_allocated =
1353       (_region_num_young - _prev_region_num_young) +
1354       (_region_num_tenured - _prev_region_num_tenured);
1355     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1356     _alloc_rate_ms_seq->add(alloc_rate_ms);
1357     _prev_region_num_young   = _region_num_young;
1358     _prev_region_num_tenured = _region_num_tenured;
1359 
1360     double interval_ms =
1361       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1362     update_recent_gc_times(end_time_sec, elapsed_ms);
1363     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1364     if (recent_avg_pause_time_ratio() < 0.0 ||
1365         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1366 #ifndef PRODUCT
1367       // Dump info to allow post-facto debugging
1368       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
1369       gclog_or_tty->print_cr("-------------------------------------------");
1370       gclog_or_tty->print_cr("Recent GC Times (ms):");
1371       _recent_gc_times_ms->dump();
1372       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
1373       _recent_prev_end_times_for_all_gcs_sec->dump();
1374       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
1375                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
1376       // In debug mode, terminate the JVM if the user wants to debug at this point.
1377       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
1378 #endif  // !PRODUCT
1379       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1380       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1381       if (_recent_avg_pause_time_ratio < 0.0) {
1382         _recent_avg_pause_time_ratio = 0.0;
1383       } else {
1384         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1385         _recent_avg_pause_time_ratio = 1.0;
1386       }
1387     }
1388   }
1389 
1390   if (G1PolicyVerbose > 1) {
1391     gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
1392   }
1393 
1394   if (G1PolicyVerbose > 1) {
1395     gclog_or_tty->print_cr("      ET: %10.6f ms           (avg: %10.6f ms)\n"
1396                            "       ET-RS:  %10.6f ms      (avg: %10.6f ms)\n"
1397                            "      |RS|: " SIZE_FORMAT,
1398                            elapsed_ms, recent_avg_time_for_pauses_ms(),
1399                            scan_rs_time, recent_avg_time_for_rs_scan_ms(),
1400                            rs_size);
1401 
1402     gclog_or_tty->print_cr("       Used at start: " SIZE_FORMAT"K"
1403                            "       At end " SIZE_FORMAT "K\n"
1404                            "       garbage      : " SIZE_FORMAT "K"
1405                            "       of     " SIZE_FORMAT "K\n"
1406                            "       survival     : %6.2f%%  (%6.2f%% avg)",
1407                            _cur_collection_pause_used_at_start_bytes/K,
1408                            _g1->used()/K, freed_bytes/K,
1409                            _collection_set_bytes_used_before/K,
1410                            survival_fraction*100.0,
1411                            recent_avg_survival_fraction()*100.0);
1412     gclog_or_tty->print_cr("       Recent %% gc pause time: %6.2f",
1413                            recent_avg_pause_time_ratio() * 100.0);
1414   }
1415 
1416   double other_time_ms = elapsed_ms;
1417 
1418   if (_satb_drain_time_set) {
1419     other_time_ms -= _cur_satb_drain_time_ms;
1420   }
1421 
1422   if (parallel) {
1423     other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
1424   } else {
1425     other_time_ms -=
1426       update_rs_time +
1427       ext_root_scan_time + mark_stack_scan_time +
1428       scan_rs_time + obj_copy_time;
1429   }
1430 
1431   if (PrintGCDetails) {
1432     gclog_or_tty->print_cr("%s, %1.8lf secs]",
1433                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
1434                            elapsed_ms / 1000.0);
1435 
1436     if (_satb_drain_time_set) {
1437       print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
1438     }
1439     if (_last_satb_drain_processed_buffers >= 0) {
1440       print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
1441     }
1442     if (parallel) {
1443       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
1444       print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
1445       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
1446       print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
1447       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
1448       print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
1449       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
1450       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
1451       print_par_stats(2, "Termination", _par_last_termination_times_ms);
1452       print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
1453       print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
1454 
1455       for (int i = 0; i < _parallel_gc_threads; i++) {
1456         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
1457       }
1458       print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
1459 
1460       print_stats(2, "Parallel Other", parallel_other_time);
1461       print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
1462     } else {
1463       print_stats(1, "Update RS", update_rs_time);
1464       print_stats(2, "Processed Buffers",
1465                   (int)update_rs_processed_buffers);
1466       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
1467       print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
1468       print_stats(1, "Scan RS", scan_rs_time);
1469       print_stats(1, "Object Copying", obj_copy_time);
1470     }
1471 #ifndef PRODUCT
1472     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
1473     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
1474     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
1475     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
1476     if (_num_cc_clears > 0) {
1477       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
1478     }
1479 #endif
1480     print_stats(1, "Other", other_time_ms);
1481     print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
1482 
1483     for (int i = 0; i < _aux_num; ++i) {
1484       if (_cur_aux_times_set[i]) {
1485         char buffer[96];
1486         sprintf(buffer, "Aux%d", i);
1487         print_stats(1, buffer, _cur_aux_times_ms[i]);
1488       }
1489     }
1490   }
1491 
1492   _all_pause_times_ms->add(elapsed_ms);
1493   if (update_stats) {
1494     summary->record_total_time_ms(elapsed_ms);
1495     summary->record_other_time_ms(other_time_ms);
1496   }
1497   for (int i = 0; i < _aux_num; ++i)
1498     if (_cur_aux_times_set[i])
1499       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
1500 
1501   // Reset marks-between-pauses counter.
1502   _n_marks_since_last_pause = 0;
1503 
1504   // Update the efficiency-since-mark vars.
1505   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
1506   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
1507     // This usually happens due to the timer not having the required
1508     // granularity. Some Linuxes are the usual culprits.
1509     // We'll just set it to something (arbitrarily) small.
1510     proc_ms = 1.0;
1511   }
1512   double cur_efficiency = (double) freed_bytes / proc_ms;
1513 
1514   bool new_in_marking_window = _in_marking_window;
1515   bool new_in_marking_window_im = false;
1516   if (during_initial_mark_pause()) {
1517     new_in_marking_window = true;
1518     new_in_marking_window_im = true;
1519   }
1520 
1521   if (_last_full_young_gc) {
1522     ergo_verbose2(ErgoPartiallyYoungGCs,
1523                   "start partially-young GCs",
1524                   ergo_format_byte_perc("known garbage"),
1525                   _known_garbage_bytes, _known_garbage_ratio * 100.0);
1526     set_full_young_gcs(false);
1527     _last_full_young_gc = false;
1528   }
1529 
1530   if ( !_last_young_gc_full ) {
1531     if (_should_revert_to_full_young_gcs) {
1532       ergo_verbose2(ErgoPartiallyYoungGCs,
1533                     "end partially-young GCs",
1534                     ergo_format_reason("partially-young GCs end requested")
1535                     ergo_format_byte_perc("known garbage"),
1536                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
1537       set_full_young_gcs(true);
1538     } else if (_known_garbage_ratio < 0.05) {
1539       ergo_verbose3(ErgoPartiallyYoungGCs,
1540                "end partially-young GCs",
1541                ergo_format_reason("known garbage percent lower than threshold")
1542                ergo_format_byte_perc("known garbage")
1543                ergo_format_perc("threshold"),
1544                _known_garbage_bytes, _known_garbage_ratio * 100.0,
1545                0.05 * 100.0);
1546       set_full_young_gcs(true);
1547     } else if (adaptive_young_list_length() &&
1548               (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
1549       ergo_verbose5(ErgoPartiallyYoungGCs,
1550                     "end partially-young GCs",
1551                     ergo_format_reason("current GC efficiency lower than "
1552                                        "predicted fully-young GC efficiency")
1553                     ergo_format_double("GC efficiency factor")
1554                     ergo_format_double("current GC efficiency")
1555                     ergo_format_double("predicted fully-young GC efficiency")
1556                     ergo_format_byte_perc("known garbage"),
1557                     get_gc_eff_factor(), cur_efficiency,
1558                     predict_young_gc_eff(),
1559                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
1560       set_full_young_gcs(true);
1561     }
1562   }
1563   _should_revert_to_full_young_gcs = false;
1564 
1565   if (_last_young_gc_full && !_during_marking) {
1566     _young_gc_eff_seq->add(cur_efficiency);
1567   }
1568 
1569   _short_lived_surv_rate_group->start_adding_regions();
1570   // do that for any other surv rate groupsx
1571 
1572   // <NEW PREDICTION>
1573 
1574   if (update_stats) {
1575     double pause_time_ms = elapsed_ms;
1576 
1577     size_t diff = 0;
1578     if (_max_pending_cards >= _pending_cards)
1579       diff = _max_pending_cards - _pending_cards;
1580     _pending_card_diff_seq->add((double) diff);
1581 
1582     double cost_per_card_ms = 0.0;
1583     if (_pending_cards > 0) {
1584       cost_per_card_ms = update_rs_time / (double) _pending_cards;
1585       _cost_per_card_ms_seq->add(cost_per_card_ms);
1586     }
1587 
1588     size_t cards_scanned = _g1->cards_scanned();
1589 
1590     double cost_per_entry_ms = 0.0;
1591     if (cards_scanned > 10) {
1592       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
1593       if (_last_young_gc_full)
1594         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1595       else
1596         _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1597     }
1598 
1599     if (_max_rs_lengths > 0) {
1600       double cards_per_entry_ratio =
1601         (double) cards_scanned / (double) _max_rs_lengths;
1602       if (_last_young_gc_full)
1603         _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1604       else
1605         _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1606     }
1607 
1608     size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1609     if (rs_length_diff >= 0)
1610       _rs_length_diff_seq->add((double) rs_length_diff);
1611 
1612     size_t copied_bytes = surviving_bytes;
1613     double cost_per_byte_ms = 0.0;
1614     if (copied_bytes > 0) {
1615       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
1616       if (_in_marking_window)
1617         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1618       else
1619         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1620     }
1621 
1622     double all_other_time_ms = pause_time_ms -
1623       (update_rs_time + scan_rs_time + obj_copy_time +
1624        _mark_closure_time_ms + termination_time);
1625 
1626     double young_other_time_ms = 0.0;
1627     if (_recorded_young_regions > 0) {
1628       young_other_time_ms =
1629         _recorded_young_cset_choice_time_ms +
1630         _recorded_young_free_cset_time_ms;
1631       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1632                                              (double) _recorded_young_regions);
1633     }
1634     double non_young_other_time_ms = 0.0;
1635     if (_recorded_non_young_regions > 0) {
1636       non_young_other_time_ms =
1637         _recorded_non_young_cset_choice_time_ms +
1638         _recorded_non_young_free_cset_time_ms;
1639 
1640       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1641                                          (double) _recorded_non_young_regions);
1642     }
1643 
1644     double constant_other_time_ms = all_other_time_ms -
1645       (young_other_time_ms + non_young_other_time_ms);
1646     _constant_other_time_ms_seq->add(constant_other_time_ms);
1647 
1648     double survival_ratio = 0.0;
1649     if (_bytes_in_collection_set_before_gc > 0) {
1650       survival_ratio = (double) _bytes_copied_during_gc /
1651                                    (double) _bytes_in_collection_set_before_gc;
1652     }
1653 
1654     _pending_cards_seq->add((double) _pending_cards);
1655     _scanned_cards_seq->add((double) cards_scanned);
1656     _rs_lengths_seq->add((double) _max_rs_lengths);
1657 
1658     double expensive_region_limit_ms =
1659       (double) MaxGCPauseMillis - predict_constant_other_time_ms();
1660     if (expensive_region_limit_ms < 0.0) {
1661       // this means that the other time was predicted to be longer than
1662       // than the max pause time
1663       expensive_region_limit_ms = (double) MaxGCPauseMillis;
1664     }
1665     _expensive_region_limit_ms = expensive_region_limit_ms;
1666 
1667     if (PREDICTIONS_VERBOSE) {
1668       gclog_or_tty->print_cr("");
1669       gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
1670                     "REGIONS %d %d %d "
1671                     "PENDING_CARDS %d %d "
1672                     "CARDS_SCANNED %d %d "
1673                     "RS_LENGTHS %d %d "
1674                     "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
1675                     "SURVIVAL_RATIO %1.6lf %1.6lf "
1676                     "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
1677                     "OTHER_YOUNG %1.6lf %1.6lf "
1678                     "OTHER_NON_YOUNG %1.6lf %1.6lf "
1679                     "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
1680                     "ELAPSED %1.6lf %1.6lf ",
1681                     _cur_collection_start_sec,
1682                     (!_last_young_gc_full) ? 2 :
1683                     (last_pause_included_initial_mark) ? 1 : 0,
1684                     _recorded_region_num,
1685                     _recorded_young_regions,
1686                     _recorded_non_young_regions,
1687                     _predicted_pending_cards, _pending_cards,
1688                     _predicted_cards_scanned, cards_scanned,
1689                     _predicted_rs_lengths, _max_rs_lengths,
1690                     _predicted_rs_update_time_ms, update_rs_time,
1691                     _predicted_rs_scan_time_ms, scan_rs_time,
1692                     _predicted_survival_ratio, survival_ratio,
1693                     _predicted_object_copy_time_ms, obj_copy_time,
1694                     _predicted_constant_other_time_ms, constant_other_time_ms,
1695                     _predicted_young_other_time_ms, young_other_time_ms,
1696                     _predicted_non_young_other_time_ms,
1697                     non_young_other_time_ms,
1698                     _vtime_diff_ms, termination_time,
1699                     _predicted_pause_time_ms, elapsed_ms);
1700     }
1701 
1702     if (G1PolicyVerbose > 0) {
1703       gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
1704                     _predicted_pause_time_ms,
1705                     (_within_target) ? "within" : "outside",
1706                     elapsed_ms);
1707     }
1708 
1709   }
1710 
1711   _in_marking_window = new_in_marking_window;
1712   _in_marking_window_im = new_in_marking_window_im;
1713   _free_regions_at_end_of_collection = _g1->free_regions();
1714   update_young_list_target_length();
1715 
1716   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1717   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1718   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
1719   // </NEW PREDICTION>
1720 }
1721 
1722 #define EXT_SIZE_FORMAT "%d%s"
1723 #define EXT_SIZE_PARAMS(bytes)                                  \
1724   byte_size_in_proper_unit((bytes)),                            \
1725   proper_unit_for_byte_size((bytes))
1726 
1727 void G1CollectorPolicy::print_heap_transition() {
1728   if (PrintGCDetails) {
1729     YoungList* young_list = _g1->young_list();
1730     size_t eden_bytes = young_list->eden_used_bytes();
1731     size_t survivor_bytes = young_list->survivor_used_bytes();
1732     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1733     size_t used = _g1->used();
1734     size_t capacity = _g1->capacity();
1735     size_t eden_capacity =
1736       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
1737 
1738     gclog_or_tty->print_cr(
1739       "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
1740       "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
1741       "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
1742       EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
1743       EXT_SIZE_PARAMS(_eden_bytes_before_gc),
1744       EXT_SIZE_PARAMS(_prev_eden_capacity),
1745       EXT_SIZE_PARAMS(eden_bytes),
1746       EXT_SIZE_PARAMS(eden_capacity),
1747       EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
1748       EXT_SIZE_PARAMS(survivor_bytes),
1749       EXT_SIZE_PARAMS(used_before_gc),
1750       EXT_SIZE_PARAMS(_capacity_before_gc),
1751       EXT_SIZE_PARAMS(used),
1752       EXT_SIZE_PARAMS(capacity));
1753 
1754     _prev_eden_capacity = eden_capacity;
1755   } else if (PrintGC) {
1756     _g1->print_size_transition(gclog_or_tty,
1757                                _cur_collection_pause_used_at_start_bytes,
1758                                _g1->used(), _g1->capacity());
1759   }
1760 }
1761 
1762 // <NEW PREDICTION>
1763 
1764 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1765                                                      double update_rs_processed_buffers,
1766                                                      double goal_ms) {
1767   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1768   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1769 
1770   if (G1UseAdaptiveConcRefinement) {
1771     const int k_gy = 3, k_gr = 6;
1772     const double inc_k = 1.1, dec_k = 0.9;
1773 
1774     int g = cg1r->green_zone();
1775     if (update_rs_time > goal_ms) {
1776       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
1777     } else {
1778       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1779         g = (int)MAX2(g * inc_k, g + 1.0);
1780       }
1781     }
1782     // Change the refinement threads params
1783     cg1r->set_green_zone(g);
1784     cg1r->set_yellow_zone(g * k_gy);
1785     cg1r->set_red_zone(g * k_gr);
1786     cg1r->reinitialize_threads();
1787 
1788     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
1789     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1790                                     cg1r->yellow_zone());
1791     // Change the barrier params
1792     dcqs.set_process_completed_threshold(processing_threshold);
1793     dcqs.set_max_completed_queue(cg1r->red_zone());
1794   }
1795 
1796   int curr_queue_size = dcqs.completed_buffers_num();
1797   if (curr_queue_size >= cg1r->yellow_zone()) {
1798     dcqs.set_completed_queue_padding(curr_queue_size);
1799   } else {
1800     dcqs.set_completed_queue_padding(0);
1801   }
1802   dcqs.notify_if_necessary();
1803 }
1804 
1805 double
1806 G1CollectorPolicy::
1807 predict_young_collection_elapsed_time_ms(size_t adjustment) {
1808   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
1809 
1810   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1811   size_t young_num = g1h->young_list()->length();
1812   if (young_num == 0)
1813     return 0.0;
1814 
1815   young_num += adjustment;
1816   size_t pending_cards = predict_pending_cards();
1817   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
1818                       predict_rs_length_diff();
1819   size_t card_num;
1820   if (full_young_gcs())
1821     card_num = predict_young_card_num(rs_lengths);
1822   else
1823     card_num = predict_non_young_card_num(rs_lengths);
1824   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
1825   double accum_yg_surv_rate =
1826     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
1827 
1828   size_t bytes_to_copy =
1829     (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
1830 
1831   return
1832     predict_rs_update_time_ms(pending_cards) +
1833     predict_rs_scan_time_ms(card_num) +
1834     predict_object_copy_time_ms(bytes_to_copy) +
1835     predict_young_other_time_ms(young_num) +
1836     predict_constant_other_time_ms();
1837 }
1838 
1839 double
1840 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1841   size_t rs_length = predict_rs_length_diff();
1842   size_t card_num;
1843   if (full_young_gcs())
1844     card_num = predict_young_card_num(rs_length);
1845   else
1846     card_num = predict_non_young_card_num(rs_length);
1847   return predict_base_elapsed_time_ms(pending_cards, card_num);
1848 }
1849 
1850 double
1851 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1852                                                 size_t scanned_cards) {
1853   return
1854     predict_rs_update_time_ms(pending_cards) +
1855     predict_rs_scan_time_ms(scanned_cards) +
1856     predict_constant_other_time_ms();
1857 }
1858 
1859 double
1860 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1861                                                   bool young) {
1862   size_t rs_length = hr->rem_set()->occupied();
1863   size_t card_num;
1864   if (full_young_gcs())
1865     card_num = predict_young_card_num(rs_length);
1866   else
1867     card_num = predict_non_young_card_num(rs_length);
1868   size_t bytes_to_copy = predict_bytes_to_copy(hr);
1869 
1870   double region_elapsed_time_ms =
1871     predict_rs_scan_time_ms(card_num) +
1872     predict_object_copy_time_ms(bytes_to_copy);
1873 
1874   if (young)
1875     region_elapsed_time_ms += predict_young_other_time_ms(1);
1876   else
1877     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1878 
1879   return region_elapsed_time_ms;
1880 }
1881 
1882 size_t
1883 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1884   size_t bytes_to_copy;
1885   if (hr->is_marked())
1886     bytes_to_copy = hr->max_live_bytes();
1887   else {
1888     guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
1889                "invariant" );
1890     int age = hr->age_in_surv_rate_group();
1891     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1892     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1893   }
1894 
1895   return bytes_to_copy;
1896 }
1897 
1898 void
1899 G1CollectorPolicy::start_recording_regions() {
1900   _recorded_rs_lengths            = 0;
1901   _recorded_young_regions         = 0;
1902   _recorded_non_young_regions     = 0;
1903 
1904 #if PREDICTIONS_VERBOSE
1905   _recorded_marked_bytes          = 0;
1906   _recorded_young_bytes           = 0;
1907   _predicted_bytes_to_copy        = 0;
1908   _predicted_rs_lengths           = 0;
1909   _predicted_cards_scanned        = 0;
1910 #endif // PREDICTIONS_VERBOSE
1911 }
1912 
1913 void
1914 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
1915 #if PREDICTIONS_VERBOSE
1916   if (!young) {
1917     _recorded_marked_bytes += hr->max_live_bytes();
1918   }
1919   _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
1920 #endif // PREDICTIONS_VERBOSE
1921 
1922   size_t rs_length = hr->rem_set()->occupied();
1923   _recorded_rs_lengths += rs_length;
1924 }
1925 
1926 void
1927 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
1928   assert(!hr->is_young(), "should not call this");
1929   ++_recorded_non_young_regions;
1930   record_cset_region_info(hr, false);
1931 }
1932 
1933 void
1934 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
1935   _recorded_young_regions = n_regions;
1936 }
1937 
1938 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
1939 #if PREDICTIONS_VERBOSE
1940   _recorded_young_bytes = bytes;
1941 #endif // PREDICTIONS_VERBOSE
1942 }
1943 
1944 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1945   _recorded_rs_lengths = rs_lengths;
1946 }
1947 
1948 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
1949   _predicted_bytes_to_copy = bytes;
1950 }
1951 
1952 void
1953 G1CollectorPolicy::end_recording_regions() {
1954   // The _predicted_pause_time_ms field is referenced in code
1955   // not under PREDICTIONS_VERBOSE. Let's initialize it.
1956   _predicted_pause_time_ms = -1.0;
1957 
1958 #if PREDICTIONS_VERBOSE
1959   _predicted_pending_cards = predict_pending_cards();
1960   _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
1961   if (full_young_gcs())
1962     _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
1963   else
1964     _predicted_cards_scanned +=
1965       predict_non_young_card_num(_predicted_rs_lengths);
1966   _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
1967 
1968   _predicted_rs_update_time_ms =
1969     predict_rs_update_time_ms(_g1->pending_card_num());
1970   _predicted_rs_scan_time_ms =
1971     predict_rs_scan_time_ms(_predicted_cards_scanned);
1972   _predicted_object_copy_time_ms =
1973     predict_object_copy_time_ms(_predicted_bytes_to_copy);
1974   _predicted_constant_other_time_ms =
1975     predict_constant_other_time_ms();
1976   _predicted_young_other_time_ms =
1977     predict_young_other_time_ms(_recorded_young_regions);
1978   _predicted_non_young_other_time_ms =
1979     predict_non_young_other_time_ms(_recorded_non_young_regions);
1980 
1981   _predicted_pause_time_ms =
1982     _predicted_rs_update_time_ms +
1983     _predicted_rs_scan_time_ms +
1984     _predicted_object_copy_time_ms +
1985     _predicted_constant_other_time_ms +
1986     _predicted_young_other_time_ms +
1987     _predicted_non_young_other_time_ms;
1988 #endif // PREDICTIONS_VERBOSE
1989 }
1990 
1991 void G1CollectorPolicy::check_if_region_is_too_expensive(double
1992                                                            predicted_time_ms) {
1993   // I don't think we need to do this when in young GC mode since
1994   // marking will be initiated next time we hit the soft limit anyway...
1995   if (predicted_time_ms > _expensive_region_limit_ms) {
1996     ergo_verbose2(ErgoPartiallyYoungGCs,
1997               "request partially-young GCs end",
1998               ergo_format_reason("predicted region time higher than threshold")
1999               ergo_format_ms("predicted region time")
2000               ergo_format_ms("threshold"),
2001               predicted_time_ms, _expensive_region_limit_ms);
2002     // no point in doing another partial one
2003     _should_revert_to_full_young_gcs = true;
2004   }
2005 }
2006 
2007 // </NEW PREDICTION>
2008 
2009 
2010 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
2011                                                double elapsed_ms) {
2012   _recent_gc_times_ms->add(elapsed_ms);
2013   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
2014   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
2015 }
2016 
2017 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
2018   if (_recent_pause_times_ms->num() == 0) {
2019     return (double) MaxGCPauseMillis;
2020   }
2021   return _recent_pause_times_ms->avg();
2022 }
2023 
2024 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
2025   if (_recent_rs_scan_times_ms->num() == 0) {
2026     return (double)MaxGCPauseMillis/3.0;
2027   }
2028   return _recent_rs_scan_times_ms->avg();
2029 }
2030 
2031 int G1CollectorPolicy::number_of_recent_gcs() {
2032   assert(_recent_rs_scan_times_ms->num() ==
2033          _recent_pause_times_ms->num(), "Sequence out of sync");
2034   assert(_recent_pause_times_ms->num() ==
2035          _recent_CS_bytes_used_before->num(), "Sequence out of sync");
2036   assert(_recent_CS_bytes_used_before->num() ==
2037          _recent_CS_bytes_surviving->num(), "Sequence out of sync");
2038 
2039   return _recent_pause_times_ms->num();
2040 }
2041 
2042 double G1CollectorPolicy::recent_avg_survival_fraction() {
2043   return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
2044                                            _recent_CS_bytes_used_before);
2045 }
2046 
2047 double G1CollectorPolicy::last_survival_fraction() {
2048   return last_survival_fraction_work(_recent_CS_bytes_surviving,
2049                                      _recent_CS_bytes_used_before);
2050 }
2051 
2052 double
2053 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
2054                                                      TruncatedSeq* before) {
2055   assert(surviving->num() == before->num(), "Sequence out of sync");
2056   if (before->sum() > 0.0) {
2057       double recent_survival_rate = surviving->sum() / before->sum();
2058       // We exempt parallel collection from this check because Alloc Buffer
2059       // fragmentation can produce negative collections.
2060       // Further, we're now always doing parallel collection.  But I'm still
2061       // leaving this here as a placeholder for a more precise assertion later.
2062       // (DLD, 10/05.)
2063       assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
2064              _g1->evacuation_failed() ||
2065              recent_survival_rate <= 1.0, "Or bad frac");
2066       return recent_survival_rate;
2067   } else {
2068     return 1.0; // Be conservative.
2069   }
2070 }
2071 
2072 double
2073 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
2074                                                TruncatedSeq* before) {
2075   assert(surviving->num() == before->num(), "Sequence out of sync");
2076   if (surviving->num() > 0 && before->last() > 0.0) {
2077     double last_survival_rate = surviving->last() / before->last();
2078     // We exempt parallel collection from this check because Alloc Buffer
2079     // fragmentation can produce negative collections.
2080     // Further, we're now always doing parallel collection.  But I'm still
2081     // leaving this here as a placeholder for a more precise assertion later.
2082     // (DLD, 10/05.)
2083     assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
2084            last_survival_rate <= 1.0, "Or bad frac");
2085     return last_survival_rate;
2086   } else {
2087     return 1.0;
2088   }
2089 }
2090 
2091 static const int survival_min_obs = 5;
2092 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
2093 static const double min_survival_rate = 0.1;
2094 
2095 double
2096 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
2097                                                            double latest) {
2098   double res = avg;
2099   if (number_of_recent_gcs() < survival_min_obs) {
2100     res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
2101   }
2102   res = MAX2(res, latest);
2103   res = MAX2(res, min_survival_rate);
2104   // In the parallel case, LAB fragmentation can produce "negative
2105   // collections"; so can evac failure.  Cap at 1.0
2106   res = MIN2(res, 1.0);
2107   return res;
2108 }
2109 
2110 size_t G1CollectorPolicy::expansion_amount() {
2111   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
2112   double threshold = _gc_overhead_perc;
2113   if (recent_gc_overhead > threshold) {
2114     // We will double the existing space, or take
2115     // G1ExpandByPercentOfAvailable % of the available expansion
2116     // space, whichever is smaller, bounded below by a minimum
2117     // expansion (unless that's all that's left.)
2118     const size_t min_expand_bytes = 1*M;
2119     size_t reserved_bytes = _g1->max_capacity();
2120     size_t committed_bytes = _g1->capacity();
2121     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
2122     size_t expand_bytes;
2123     size_t expand_bytes_via_pct =
2124       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
2125     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
2126     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
2127     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
2128 
2129     ergo_verbose5(ErgoHeapSizing,
2130                   "attempt heap expansion",
2131                   ergo_format_reason("recent GC overhead higher than "
2132                                      "threshold after GC")
2133                   ergo_format_perc("recent GC overhead")
2134                   ergo_format_perc("threshold")
2135                   ergo_format_byte("uncommitted")
2136                   ergo_format_byte_perc("calculated expansion amount"),
2137                   recent_gc_overhead, threshold,
2138                   uncommitted_bytes,
2139                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
2140 
2141     return expand_bytes;
2142   } else {
2143     return 0;
2144   }
2145 }
2146 
2147 void G1CollectorPolicy::note_start_of_mark_thread() {
2148   _mark_thread_startup_sec = os::elapsedTime();
2149 }
2150 
2151 class CountCSClosure: public HeapRegionClosure {
2152   G1CollectorPolicy* _g1_policy;
2153 public:
2154   CountCSClosure(G1CollectorPolicy* g1_policy) :
2155     _g1_policy(g1_policy) {}
2156   bool doHeapRegion(HeapRegion* r) {
2157     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
2158     return false;
2159   }
2160 };
2161 
2162 void G1CollectorPolicy::count_CS_bytes_used() {
2163   CountCSClosure cs_closure(this);
2164   _g1->collection_set_iterate(&cs_closure);
2165 }
2166 
2167 void G1CollectorPolicy::print_summary (int level,
2168                                        const char* str,
2169                                        NumberSeq* seq) const {
2170   double sum = seq->sum();
2171   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
2172                 str, sum / 1000.0, seq->avg());
2173 }
2174 
2175 void G1CollectorPolicy::print_summary_sd (int level,
2176                                           const char* str,
2177                                           NumberSeq* seq) const {
2178   print_summary(level, str, seq);
2179   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2180                 seq->num(), seq->sd(), seq->maximum());
2181 }
2182 
2183 void G1CollectorPolicy::check_other_times(int level,
2184                                         NumberSeq* other_times_ms,
2185                                         NumberSeq* calc_other_times_ms) const {
2186   bool should_print = false;
2187   LineBuffer buf(level + 2);
2188 
2189   double max_sum = MAX2(fabs(other_times_ms->sum()),
2190                         fabs(calc_other_times_ms->sum()));
2191   double min_sum = MIN2(fabs(other_times_ms->sum()),
2192                         fabs(calc_other_times_ms->sum()));
2193   double sum_ratio = max_sum / min_sum;
2194   if (sum_ratio > 1.1) {
2195     should_print = true;
2196     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
2197   }
2198 
2199   double max_avg = MAX2(fabs(other_times_ms->avg()),
2200                         fabs(calc_other_times_ms->avg()));
2201   double min_avg = MIN2(fabs(other_times_ms->avg()),
2202                         fabs(calc_other_times_ms->avg()));
2203   double avg_ratio = max_avg / min_avg;
2204   if (avg_ratio > 1.1) {
2205     should_print = true;
2206     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
2207   }
2208 
2209   if (other_times_ms->sum() < -0.01) {
2210     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
2211   }
2212 
2213   if (other_times_ms->avg() < -0.01) {
2214     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
2215   }
2216 
2217   if (calc_other_times_ms->sum() < -0.01) {
2218     should_print = true;
2219     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
2220   }
2221 
2222   if (calc_other_times_ms->avg() < -0.01) {
2223     should_print = true;
2224     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
2225   }
2226 
2227   if (should_print)
2228     print_summary(level, "Other(Calc)", calc_other_times_ms);
2229 }
2230 
2231 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
2232   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
2233   MainBodySummary*    body_summary = summary->main_body_summary();
2234   if (summary->get_total_seq()->num() > 0) {
2235     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
2236     if (body_summary != NULL) {
2237       print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
2238       if (parallel) {
2239         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
2240         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
2241         print_summary(2, "Ext Root Scanning",
2242                       body_summary->get_ext_root_scan_seq());
2243         print_summary(2, "Mark Stack Scanning",
2244                       body_summary->get_mark_stack_scan_seq());
2245         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
2246         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
2247         print_summary(2, "Termination", body_summary->get_termination_seq());
2248         print_summary(2, "Other", body_summary->get_parallel_other_seq());
2249         {
2250           NumberSeq* other_parts[] = {
2251             body_summary->get_update_rs_seq(),
2252             body_summary->get_ext_root_scan_seq(),
2253             body_summary->get_mark_stack_scan_seq(),
2254             body_summary->get_scan_rs_seq(),
2255             body_summary->get_obj_copy_seq(),
2256             body_summary->get_termination_seq()
2257           };
2258           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
2259                                         6, other_parts);
2260           check_other_times(2, body_summary->get_parallel_other_seq(),
2261                             &calc_other_times_ms);
2262         }
2263         print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
2264         print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
2265       } else {
2266         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
2267         print_summary(1, "Ext Root Scanning",
2268                       body_summary->get_ext_root_scan_seq());
2269         print_summary(1, "Mark Stack Scanning",
2270                       body_summary->get_mark_stack_scan_seq());
2271         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
2272         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
2273       }
2274     }
2275     print_summary(1, "Other", summary->get_other_seq());
2276     {
2277       if (body_summary != NULL) {
2278         NumberSeq calc_other_times_ms;
2279         if (parallel) {
2280           // parallel
2281           NumberSeq* other_parts[] = {
2282             body_summary->get_satb_drain_seq(),
2283             body_summary->get_parallel_seq(),
2284             body_summary->get_clear_ct_seq()
2285           };
2286           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
2287                                                 3, other_parts);
2288         } else {
2289           // serial
2290           NumberSeq* other_parts[] = {
2291             body_summary->get_satb_drain_seq(),
2292             body_summary->get_update_rs_seq(),
2293             body_summary->get_ext_root_scan_seq(),
2294             body_summary->get_mark_stack_scan_seq(),
2295             body_summary->get_scan_rs_seq(),
2296             body_summary->get_obj_copy_seq()
2297           };
2298           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
2299                                                 6, other_parts);
2300         }
2301         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
2302       }
2303     }
2304   } else {
2305     LineBuffer(1).append_and_print_cr("none");
2306   }
2307   LineBuffer(0).append_and_print_cr("");
2308 }
2309 
2310 void G1CollectorPolicy::print_tracing_info() const {
2311   if (TraceGen0Time) {
2312     gclog_or_tty->print_cr("ALL PAUSES");
2313     print_summary_sd(0, "Total", _all_pause_times_ms);
2314     gclog_or_tty->print_cr("");
2315     gclog_or_tty->print_cr("");
2316     gclog_or_tty->print_cr("   Full Young GC Pauses:    %8d", _full_young_pause_num);
2317     gclog_or_tty->print_cr("   Partial Young GC Pauses: %8d", _partial_young_pause_num);
2318     gclog_or_tty->print_cr("");
2319 
2320     gclog_or_tty->print_cr("EVACUATION PAUSES");
2321     print_summary(_summary);
2322 
2323     gclog_or_tty->print_cr("MISC");
2324     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
2325     print_summary_sd(0, "Yields", _all_yield_times_ms);
2326     for (int i = 0; i < _aux_num; ++i) {
2327       if (_all_aux_times_ms[i].num() > 0) {
2328         char buffer[96];
2329         sprintf(buffer, "Aux%d", i);
2330         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
2331       }
2332     }
2333 
2334     size_t all_region_num = _region_num_young + _region_num_tenured;
2335     gclog_or_tty->print_cr("   New Regions %8d, Young %8d (%6.2lf%%), "
2336                "Tenured %8d (%6.2lf%%)",
2337                all_region_num,
2338                _region_num_young,
2339                (double) _region_num_young / (double) all_region_num * 100.0,
2340                _region_num_tenured,
2341                (double) _region_num_tenured / (double) all_region_num * 100.0);
2342   }
2343   if (TraceGen1Time) {
2344     if (_all_full_gc_times_ms->num() > 0) {
2345       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2346                  _all_full_gc_times_ms->num(),
2347                  _all_full_gc_times_ms->sum() / 1000.0);
2348       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
2349       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
2350                     _all_full_gc_times_ms->sd(),
2351                     _all_full_gc_times_ms->maximum());
2352     }
2353   }
2354 }
2355 
2356 void G1CollectorPolicy::print_yg_surv_rate_info() const {
2357 #ifndef PRODUCT
2358   _short_lived_surv_rate_group->print_surv_rate_summary();
2359   // add this call for any other surv rate groups
2360 #endif // PRODUCT
2361 }
2362 
2363 void G1CollectorPolicy::update_region_num(bool young) {
2364   if (young) {
2365     ++_region_num_young;
2366   } else {
2367     ++_region_num_tenured;
2368   }
2369 }
2370 
2371 #ifndef PRODUCT
2372 // for debugging, bit of a hack...
2373 static char*
2374 region_num_to_mbs(int length) {
2375   static char buffer[64];
2376   double bytes = (double) (length * HeapRegion::GrainBytes);
2377   double mbs = bytes / (double) (1024 * 1024);
2378   sprintf(buffer, "%7.2lfMB", mbs);
2379   return buffer;
2380 }
2381 #endif // PRODUCT
2382 
2383 size_t G1CollectorPolicy::max_regions(int purpose) {
2384   switch (purpose) {
2385     case GCAllocForSurvived:
2386       return _max_survivor_regions;
2387     case GCAllocForTenured:
2388       return REGIONS_UNLIMITED;
2389     default:
2390       ShouldNotReachHere();
2391       return REGIONS_UNLIMITED;
2392   };
2393 }
2394 
2395 void G1CollectorPolicy::update_max_gc_locker_expansion() {
2396   size_t expansion_region_num = 0;
2397   if (GCLockerEdenExpansionPercent > 0) {
2398     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
2399     double expansion_region_num_d = perc * (double) _young_list_target_length;
2400     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
2401     // less than 1.0) we'll get 1.
2402     expansion_region_num = (size_t) ceil(expansion_region_num_d);
2403   } else {
2404     assert(expansion_region_num == 0, "sanity");
2405   }
2406   _young_list_max_length = _young_list_target_length + expansion_region_num;
2407   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
2408 }
2409 
2410 // Calculates survivor space parameters.
2411 void G1CollectorPolicy::update_survivors_policy() {
2412   double max_survivor_regions_d =
2413                  (double) _young_list_target_length / (double) SurvivorRatio;
2414   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
2415   // smaller than 1.0) we'll get 1.
2416   _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
2417 
2418   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
2419         HeapRegion::GrainWords * _max_survivor_regions);
2420 }
2421 
2422 #ifndef PRODUCT
2423 class HRSortIndexIsOKClosure: public HeapRegionClosure {
2424   CollectionSetChooser* _chooser;
2425 public:
2426   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
2427     _chooser(chooser) {}
2428 
2429   bool doHeapRegion(HeapRegion* r) {
2430     if (!r->continuesHumongous()) {
2431       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
2432     }
2433     return false;
2434   }
2435 };
2436 
2437 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
2438   HRSortIndexIsOKClosure cl(_collectionSetChooser);
2439   _g1->heap_region_iterate(&cl);
2440   return true;
2441 }
2442 #endif
2443 
2444 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
2445                                                      GCCause::Cause gc_cause) {
2446   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
2447   if (!during_cycle) {
2448     ergo_verbose1(ErgoConcCycles,
2449                   "request concurrent cycle initiation",
2450                   ergo_format_reason("requested by GC cause")
2451                   ergo_format_str("GC cause"),
2452                   GCCause::to_string(gc_cause));
2453     set_initiate_conc_mark_if_possible();
2454     return true;
2455   } else {
2456     ergo_verbose1(ErgoConcCycles,
2457                   "do not request concurrent cycle initiation",
2458                   ergo_format_reason("concurrent cycle already in progress")
2459                   ergo_format_str("GC cause"),
2460                   GCCause::to_string(gc_cause));
2461     return false;
2462   }
2463 }
2464 
2465 void
2466 G1CollectorPolicy::decide_on_conc_mark_initiation() {
2467   // We are about to decide on whether this pause will be an
2468   // initial-mark pause.
2469 
2470   // First, during_initial_mark_pause() should not be already set. We
2471   // will set it here if we have to. However, it should be cleared by
2472   // the end of the pause (it's only set for the duration of an
2473   // initial-mark pause).
2474   assert(!during_initial_mark_pause(), "pre-condition");
2475 
2476   if (initiate_conc_mark_if_possible()) {
2477     // We had noticed on a previous pause that the heap occupancy has
2478     // gone over the initiating threshold and we should start a
2479     // concurrent marking cycle. So we might initiate one.
2480 
2481     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
2482     if (!during_cycle) {
2483       // The concurrent marking thread is not "during a cycle", i.e.,
2484       // it has completed the last one. So we can go ahead and
2485       // initiate a new cycle.
2486 
2487       set_during_initial_mark_pause();
2488 
2489       // And we can now clear initiate_conc_mark_if_possible() as
2490       // we've already acted on it.
2491       clear_initiate_conc_mark_if_possible();
2492 
2493       ergo_verbose0(ErgoConcCycles,
2494                   "initiate concurrent cycle",
2495                   ergo_format_reason("concurrent cycle initiation requested"));
2496     } else {
2497       // The concurrent marking thread is still finishing up the
2498       // previous cycle. If we start one right now the two cycles
2499       // overlap. In particular, the concurrent marking thread might
2500       // be in the process of clearing the next marking bitmap (which
2501       // we will use for the next cycle if we start one). Starting a
2502       // cycle now will be bad given that parts of the marking
2503       // information might get cleared by the marking thread. And we
2504       // cannot wait for the marking thread to finish the cycle as it
2505       // periodically yields while clearing the next marking bitmap
2506       // and, if it's in a yield point, it's waiting for us to
2507       // finish. So, at this point we will not start a cycle and we'll
2508       // let the concurrent marking thread complete the last one.
2509       ergo_verbose0(ErgoConcCycles,
2510                     "do not initiate concurrent cycle",
2511                     ergo_format_reason("concurrent cycle already in progress"));
2512     }
2513   }
2514 }
2515 
2516 void
2517 G1CollectorPolicy_BestRegionsFirst::
2518 record_collection_pause_start(double start_time_sec, size_t start_used) {
2519   G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
2520 }
2521 
2522 class KnownGarbageClosure: public HeapRegionClosure {
2523   CollectionSetChooser* _hrSorted;
2524 
2525 public:
2526   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
2527     _hrSorted(hrSorted)
2528   {}
2529 
2530   bool doHeapRegion(HeapRegion* r) {
2531     // We only include humongous regions in collection
2532     // sets when concurrent mark shows that their contained object is
2533     // unreachable.
2534 
2535     // Do we have any marking information for this region?
2536     if (r->is_marked()) {
2537       // We don't include humongous regions in collection
2538       // sets because we collect them immediately at the end of a marking
2539       // cycle.  We also don't include young regions because we *must*
2540       // include them in the next collection pause.
2541       if (!r->isHumongous() && !r->is_young()) {
2542         _hrSorted->addMarkedHeapRegion(r);
2543       }
2544     }
2545     return false;
2546   }
2547 };
2548 
2549 class ParKnownGarbageHRClosure: public HeapRegionClosure {
2550   CollectionSetChooser* _hrSorted;
2551   jint _marked_regions_added;
2552   jint _chunk_size;
2553   jint _cur_chunk_idx;
2554   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
2555   int _worker;
2556   int _invokes;
2557 
2558   void get_new_chunk() {
2559     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
2560     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
2561   }
2562   void add_region(HeapRegion* r) {
2563     if (_cur_chunk_idx == _cur_chunk_end) {
2564       get_new_chunk();
2565     }
2566     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
2567     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
2568     _marked_regions_added++;
2569     _cur_chunk_idx++;
2570   }
2571 
2572 public:
2573   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
2574                            jint chunk_size,
2575                            int worker) :
2576     _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
2577     _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
2578     _invokes(0)
2579   {}
2580 
2581   bool doHeapRegion(HeapRegion* r) {
2582     // We only include humongous regions in collection
2583     // sets when concurrent mark shows that their contained object is
2584     // unreachable.
2585     _invokes++;
2586 
2587     // Do we have any marking information for this region?
2588     if (r->is_marked()) {
2589       // We don't include humongous regions in collection
2590       // sets because we collect them immediately at the end of a marking
2591       // cycle.
2592       // We also do not include young regions in collection sets
2593       if (!r->isHumongous() && !r->is_young()) {
2594         add_region(r);
2595       }
2596     }
2597     return false;
2598   }
2599   jint marked_regions_added() { return _marked_regions_added; }
2600   int invokes() { return _invokes; }
2601 };
2602 
2603 class ParKnownGarbageTask: public AbstractGangTask {
2604   CollectionSetChooser* _hrSorted;
2605   jint _chunk_size;
2606   G1CollectedHeap* _g1;
2607 public:
2608   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
2609     AbstractGangTask("ParKnownGarbageTask"),
2610     _hrSorted(hrSorted), _chunk_size(chunk_size),
2611     _g1(G1CollectedHeap::heap())
2612   {}
2613 
2614   void work(int i) {
2615     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
2616     // Back to zero for the claim value.
2617     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
2618                                          HeapRegion::InitialClaimValue);
2619     jint regions_added = parKnownGarbageCl.marked_regions_added();
2620     _hrSorted->incNumMarkedHeapRegions(regions_added);
2621     if (G1PrintParCleanupStats) {
2622       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
2623                  i, parKnownGarbageCl.invokes(), regions_added);
2624     }
2625   }
2626 };
2627 
2628 void
2629 G1CollectorPolicy_BestRegionsFirst::
2630 record_concurrent_mark_cleanup_end(size_t freed_bytes,
2631                                    size_t max_live_bytes) {
2632   double start;
2633   if (G1PrintParCleanupStats) start = os::elapsedTime();
2634   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
2635 
2636   _collectionSetChooser->clearMarkedHeapRegions();
2637   double clear_marked_end;
2638   if (G1PrintParCleanupStats) {
2639     clear_marked_end = os::elapsedTime();
2640     gclog_or_tty->print_cr("  clear marked regions + work1: %8.3f ms.",
2641                   (clear_marked_end - start)*1000.0);
2642   }
2643   if (G1CollectedHeap::use_parallel_gc_threads()) {
2644     const size_t OverpartitionFactor = 4;
2645     const size_t MinWorkUnit = 8;
2646     const size_t WorkUnit =
2647       MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
2648            MinWorkUnit);
2649     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
2650                                                              WorkUnit);
2651     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
2652                                             (int) WorkUnit);
2653     _g1->workers()->run_task(&parKnownGarbageTask);
2654 
2655     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2656            "sanity check");
2657   } else {
2658     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
2659     _g1->heap_region_iterate(&knownGarbagecl);
2660   }
2661   double known_garbage_end;
2662   if (G1PrintParCleanupStats) {
2663     known_garbage_end = os::elapsedTime();
2664     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
2665                   (known_garbage_end - clear_marked_end)*1000.0);
2666   }
2667   _collectionSetChooser->sortMarkedHeapRegions();
2668   double sort_end;
2669   if (G1PrintParCleanupStats) {
2670     sort_end = os::elapsedTime();
2671     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
2672                   (sort_end - known_garbage_end)*1000.0);
2673   }
2674 
2675   record_concurrent_mark_cleanup_end_work2();
2676   double work2_end;
2677   if (G1PrintParCleanupStats) {
2678     work2_end = os::elapsedTime();
2679     gclog_or_tty->print_cr("  work2: %8.3f ms.",
2680                   (work2_end - sort_end)*1000.0);
2681   }
2682 }
2683 
2684 // Add the heap region at the head of the non-incremental collection set
2685 void G1CollectorPolicy::
2686 add_to_collection_set(HeapRegion* hr) {
2687   assert(_inc_cset_build_state == Active, "Precondition");
2688   assert(!hr->is_young(), "non-incremental add of young region");
2689 
2690   if (_g1->mark_in_progress())
2691     _g1->concurrent_mark()->registerCSetRegion(hr);
2692 
2693   assert(!hr->in_collection_set(), "should not already be in the CSet");
2694   hr->set_in_collection_set(true);
2695   hr->set_next_in_collection_set(_collection_set);
2696   _collection_set = hr;
2697   _collection_set_size++;
2698   _collection_set_bytes_used_before += hr->used();
2699   _g1->register_region_with_in_cset_fast_test(hr);
2700 }
2701 
2702 // Initialize the per-collection-set information
2703 void G1CollectorPolicy::start_incremental_cset_building() {
2704   assert(_inc_cset_build_state == Inactive, "Precondition");
2705 
2706   _inc_cset_head = NULL;
2707   _inc_cset_tail = NULL;
2708   _inc_cset_size = 0;
2709   _inc_cset_bytes_used_before = 0;
2710 
2711   _inc_cset_young_index = 0;
2712 
2713   _inc_cset_max_finger = 0;
2714   _inc_cset_recorded_young_bytes = 0;
2715   _inc_cset_recorded_rs_lengths = 0;
2716   _inc_cset_predicted_elapsed_time_ms = 0;
2717   _inc_cset_predicted_bytes_to_copy = 0;
2718   _inc_cset_build_state = Active;
2719 }
2720 
2721 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
2722   // This routine is used when:
2723   // * adding survivor regions to the incremental cset at the end of an
2724   //   evacuation pause,
2725   // * adding the current allocation region to the incremental cset
2726   //   when it is retired, and
2727   // * updating existing policy information for a region in the
2728   //   incremental cset via young list RSet sampling.
2729   // Therefore this routine may be called at a safepoint by the
2730   // VM thread, or in-between safepoints by mutator threads (when
2731   // retiring the current allocation region) or a concurrent
2732   // refine thread (RSet sampling).
2733 
2734   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
2735   size_t used_bytes = hr->used();
2736 
2737   _inc_cset_recorded_rs_lengths += rs_length;
2738   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
2739 
2740   _inc_cset_bytes_used_before += used_bytes;
2741 
2742   // Cache the values we have added to the aggregated informtion
2743   // in the heap region in case we have to remove this region from
2744   // the incremental collection set, or it is updated by the
2745   // rset sampling code
2746   hr->set_recorded_rs_length(rs_length);
2747   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
2748 
2749 #if PREDICTIONS_VERBOSE
2750   size_t bytes_to_copy = predict_bytes_to_copy(hr);
2751   _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
2752 
2753   // Record the number of bytes used in this region
2754   _inc_cset_recorded_young_bytes += used_bytes;
2755 
2756   // Cache the values we have added to the aggregated informtion
2757   // in the heap region in case we have to remove this region from
2758   // the incremental collection set, or it is updated by the
2759   // rset sampling code
2760   hr->set_predicted_bytes_to_copy(bytes_to_copy);
2761 #endif // PREDICTIONS_VERBOSE
2762 }
2763 
2764 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
2765   // This routine is currently only called as part of the updating of
2766   // existing policy information for regions in the incremental cset that
2767   // is performed by the concurrent refine thread(s) as part of young list
2768   // RSet sampling. Therefore we should not be at a safepoint.
2769 
2770   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
2771   assert(hr->is_young(), "it should be");
2772 
2773   size_t used_bytes = hr->used();
2774   size_t old_rs_length = hr->recorded_rs_length();
2775   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
2776 
2777   // Subtract the old recorded/predicted policy information for
2778   // the given heap region from the collection set info.
2779   _inc_cset_recorded_rs_lengths -= old_rs_length;
2780   _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
2781 
2782   _inc_cset_bytes_used_before -= used_bytes;
2783 
2784   // Clear the values cached in the heap region
2785   hr->set_recorded_rs_length(0);
2786   hr->set_predicted_elapsed_time_ms(0);
2787 
2788 #if PREDICTIONS_VERBOSE
2789   size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
2790   _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
2791 
2792   // Subtract the number of bytes used in this region
2793   _inc_cset_recorded_young_bytes -= used_bytes;
2794 
2795   // Clear the values cached in the heap region
2796   hr->set_predicted_bytes_to_copy(0);
2797 #endif // PREDICTIONS_VERBOSE
2798 }
2799 
2800 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
2801   // Update the collection set information that is dependent on the new RS length
2802   assert(hr->is_young(), "Precondition");
2803 
2804   remove_from_incremental_cset_info(hr);
2805   add_to_incremental_cset_info(hr, new_rs_length);
2806 }
2807 
2808 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
2809   assert( hr->is_young(), "invariant");
2810   assert( hr->young_index_in_cset() == -1, "invariant" );
2811   assert(_inc_cset_build_state == Active, "Precondition");
2812 
2813   // We need to clear and set the cached recorded/cached collection set
2814   // information in the heap region here (before the region gets added
2815   // to the collection set). An individual heap region's cached values
2816   // are calculated, aggregated with the policy collection set info,
2817   // and cached in the heap region here (initially) and (subsequently)
2818   // by the Young List sampling code.
2819 
2820   size_t rs_length = hr->rem_set()->occupied();
2821   add_to_incremental_cset_info(hr, rs_length);
2822 
2823   HeapWord* hr_end = hr->end();
2824   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
2825 
2826   assert(!hr->in_collection_set(), "invariant");
2827   hr->set_in_collection_set(true);
2828   assert( hr->next_in_collection_set() == NULL, "invariant");
2829 
2830   _inc_cset_size++;
2831   _g1->register_region_with_in_cset_fast_test(hr);
2832 
2833   hr->set_young_index_in_cset((int) _inc_cset_young_index);
2834   ++_inc_cset_young_index;
2835 }
2836 
2837 // Add the region at the RHS of the incremental cset
2838 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
2839   // We should only ever be appending survivors at the end of a pause
2840   assert( hr->is_survivor(), "Logic");
2841 
2842   // Do the 'common' stuff
2843   add_region_to_incremental_cset_common(hr);
2844 
2845   // Now add the region at the right hand side
2846   if (_inc_cset_tail == NULL) {
2847     assert(_inc_cset_head == NULL, "invariant");
2848     _inc_cset_head = hr;
2849   } else {
2850     _inc_cset_tail->set_next_in_collection_set(hr);
2851   }
2852   _inc_cset_tail = hr;
2853 }
2854 
2855 // Add the region to the LHS of the incremental cset
2856 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
2857   // Survivors should be added to the RHS at the end of a pause
2858   assert(!hr->is_survivor(), "Logic");
2859 
2860   // Do the 'common' stuff
2861   add_region_to_incremental_cset_common(hr);
2862 
2863   // Add the region at the left hand side
2864   hr->set_next_in_collection_set(_inc_cset_head);
2865   if (_inc_cset_head == NULL) {
2866     assert(_inc_cset_tail == NULL, "Invariant");
2867     _inc_cset_tail = hr;
2868   }
2869   _inc_cset_head = hr;
2870 }
2871 
2872 #ifndef PRODUCT
2873 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
2874   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
2875 
2876   st->print_cr("\nCollection_set:");
2877   HeapRegion* csr = list_head;
2878   while (csr != NULL) {
2879     HeapRegion* next = csr->next_in_collection_set();
2880     assert(csr->in_collection_set(), "bad CS");
2881     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
2882                  "age: %4d, y: %d, surv: %d",
2883                         csr->bottom(), csr->end(),
2884                         csr->top(),
2885                         csr->prev_top_at_mark_start(),
2886                         csr->next_top_at_mark_start(),
2887                         csr->top_at_conc_mark_count(),
2888                         csr->age_in_surv_rate_group_cond(),
2889                         csr->is_young(),
2890                         csr->is_survivor());
2891     csr = next;
2892   }
2893 }
2894 #endif // !PRODUCT
2895 
2896 void
2897 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
2898                                                   double target_pause_time_ms) {
2899   // Set this here - in case we're not doing young collections.
2900   double non_young_start_time_sec = os::elapsedTime();
2901 
2902   YoungList* young_list = _g1->young_list();
2903 
2904   start_recording_regions();
2905 
2906   guarantee(target_pause_time_ms > 0.0,
2907             err_msg("target_pause_time_ms = %1.6lf should be positive",
2908                     target_pause_time_ms));
2909   guarantee(_collection_set == NULL, "Precondition");
2910 
2911   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2912   double predicted_pause_time_ms = base_time_ms;
2913 
2914   double time_remaining_ms = target_pause_time_ms - base_time_ms;
2915 
2916   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
2917                 "start choosing CSet",
2918                 ergo_format_ms("predicted base time")
2919                 ergo_format_ms("remaining time")
2920                 ergo_format_ms("target pause time"),
2921                 base_time_ms, time_remaining_ms, target_pause_time_ms);
2922 
2923   // the 10% and 50% values are arbitrary...
2924   double threshold = 0.10 * target_pause_time_ms;
2925   if (time_remaining_ms < threshold) {
2926     double prev_time_remaining_ms = time_remaining_ms;
2927     time_remaining_ms = 0.50 * target_pause_time_ms;
2928     _within_target = false;
2929     ergo_verbose3(ErgoCSetConstruction,
2930                   "adjust remaining time",
2931                   ergo_format_reason("remaining time lower than threshold")
2932                   ergo_format_ms("remaining time")
2933                   ergo_format_ms("threshold")
2934                   ergo_format_ms("adjusted remaining time"),
2935                   prev_time_remaining_ms, threshold, time_remaining_ms);
2936   } else {
2937     _within_target = true;
2938   }
2939 
2940   size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
2941 
2942   HeapRegion* hr;
2943   double young_start_time_sec = os::elapsedTime();
2944 
2945   _collection_set_bytes_used_before = 0;
2946   _collection_set_size = 0;
2947   _young_cset_length  = 0;
2948   _last_young_gc_full = full_young_gcs() ? true : false;
2949 
2950   if (_last_young_gc_full) {
2951     ++_full_young_pause_num;
2952   } else {
2953     ++_partial_young_pause_num;
2954   }
2955 
2956   // The young list is laid with the survivor regions from the previous
2957   // pause are appended to the RHS of the young list, i.e.
2958   //   [Newly Young Regions ++ Survivors from last pause].
2959 
2960   size_t survivor_region_num = young_list->survivor_length();
2961   size_t eden_region_num = young_list->length() - survivor_region_num;
2962   size_t old_region_num = 0;
2963   hr = young_list->first_survivor_region();
2964   while (hr != NULL) {
2965     assert(hr->is_survivor(), "badly formed young list");
2966     hr->set_young();
2967     hr = hr->get_next_young_region();
2968   }
2969 
2970   // Clear the fields that point to the survivor list - they are all young now.
2971   young_list->clear_survivors();
2972 
2973   if (_g1->mark_in_progress())
2974     _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
2975 
2976   _young_cset_length = _inc_cset_young_index;
2977   _collection_set = _inc_cset_head;
2978   _collection_set_size = _inc_cset_size;
2979   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2980   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
2981   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
2982 
2983   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
2984                 "add young regions to CSet",
2985                 ergo_format_region("eden")
2986                 ergo_format_region("survivors")
2987                 ergo_format_ms("predicted young region time"),
2988                 eden_region_num, survivor_region_num,
2989                 _inc_cset_predicted_elapsed_time_ms);
2990 
2991   // The number of recorded young regions is the incremental
2992   // collection set's current size
2993   set_recorded_young_regions(_inc_cset_size);
2994   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2995   set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
2996 #if PREDICTIONS_VERBOSE
2997   set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
2998 #endif // PREDICTIONS_VERBOSE
2999 
3000   assert(_inc_cset_size == young_list->length(), "Invariant");
3001 
3002   double young_end_time_sec = os::elapsedTime();
3003   _recorded_young_cset_choice_time_ms =
3004     (young_end_time_sec - young_start_time_sec) * 1000.0;
3005 
3006   // We are doing young collections so reset this.
3007   non_young_start_time_sec = young_end_time_sec;
3008 
3009   if (!full_young_gcs()) {
3010     bool should_continue = true;
3011     NumberSeq seq;
3012     double avg_prediction = 100000000000000000.0; // something very large
3013 
3014     size_t prev_collection_set_size = _collection_set_size;
3015     double prev_predicted_pause_time_ms = predicted_pause_time_ms;
3016     do {
3017       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
3018                                                       avg_prediction);
3019       if (hr != NULL) {
3020         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
3021         time_remaining_ms -= predicted_time_ms;
3022         predicted_pause_time_ms += predicted_time_ms;
3023         add_to_collection_set(hr);
3024         record_non_young_cset_region(hr);
3025         seq.add(predicted_time_ms);
3026         avg_prediction = seq.avg() + seq.sd();
3027       }
3028 
3029       should_continue = true;
3030       if (hr == NULL) {
3031         // No need for an ergo verbose message here,
3032         // getNextMarkRegion() does this when it returns NULL.
3033         should_continue = false;
3034       } else {
3035         if (adaptive_young_list_length()) {
3036           if (time_remaining_ms < 0.0) {
3037             ergo_verbose1(ErgoCSetConstruction,
3038                           "stop adding old regions to CSet",
3039                           ergo_format_reason("remaining time is lower than 0")
3040                           ergo_format_ms("remaining time"),
3041                           time_remaining_ms);
3042             should_continue = false;
3043           }
3044         } else {
3045           if (_collection_set_size >= _young_list_fixed_length) {
3046             ergo_verbose2(ErgoCSetConstruction,
3047                           "stop adding old regions to CSet",
3048                           ergo_format_reason("CSet length reached target")
3049                           ergo_format_region("CSet")
3050                           ergo_format_region("young target"),
3051                           _collection_set_size, _young_list_fixed_length);
3052             should_continue = false;
3053           }
3054         }
3055       }
3056     } while (should_continue);
3057 
3058     if (!adaptive_young_list_length() &&
3059         _collection_set_size < _young_list_fixed_length) {
3060       ergo_verbose2(ErgoCSetConstruction,
3061                     "request partially-young GCs end",
3062                     ergo_format_reason("CSet length lower than target")
3063                     ergo_format_region("CSet")
3064                     ergo_format_region("young target"),
3065                     _collection_set_size, _young_list_fixed_length);
3066       _should_revert_to_full_young_gcs  = true;
3067     }
3068 
3069     old_region_num = _collection_set_size - prev_collection_set_size;
3070 
3071     ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
3072                   "add old regions to CSet",
3073                   ergo_format_region("old")
3074                   ergo_format_ms("predicted old region time"),
3075                   old_region_num,
3076                   predicted_pause_time_ms - prev_predicted_pause_time_ms);
3077   }
3078 
3079   stop_incremental_cset_building();
3080 
3081   count_CS_bytes_used();
3082 
3083   end_recording_regions();
3084 
3085   ergo_verbose5(ErgoCSetConstruction,
3086                 "finish choosing CSet",
3087                 ergo_format_region("eden")
3088                 ergo_format_region("survivors")
3089                 ergo_format_region("old")
3090                 ergo_format_ms("predicted pause time")
3091                 ergo_format_ms("target pause time"),
3092                 eden_region_num, survivor_region_num, old_region_num,
3093                 predicted_pause_time_ms, target_pause_time_ms);
3094 
3095   double non_young_end_time_sec = os::elapsedTime();
3096   _recorded_non_young_cset_choice_time_ms =
3097     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
3098 }
3099 
3100 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
3101   G1CollectorPolicy::record_full_collection_end();
3102   _collectionSetChooser->updateAfterFullCollection();
3103 }
3104 
3105 void G1CollectorPolicy_BestRegionsFirst::
3106 record_collection_pause_end() {
3107   G1CollectorPolicy::record_collection_pause_end();
3108   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
3109 }