1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  27 #include "gc_implementation/g1/concurrentMark.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/g1Log.hpp"
  33 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  34 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  35 #include "runtime/arguments.hpp"
  36 #include "runtime/java.hpp"
  37 #include "runtime/mutexLocker.hpp"
  38 #include "utilities/debug.hpp"
  39 
  40 // Different defaults for different number of GC threads
  41 // They were chosen by running GCOld and SPECjbb on debris with different
  42 //   numbers of GC threads and choosing them based on the results
  43 
  44 // all the same
  45 static double rs_length_diff_defaults[] = {
  46   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  47 };
  48 
  49 static double cost_per_card_ms_defaults[] = {
  50   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
  51 };
  52 
  53 // all the same
  54 static double young_cards_per_entry_ratio_defaults[] = {
  55   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
  56 };
  57 
  58 static double cost_per_entry_ms_defaults[] = {
  59   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
  60 };
  61 
  62 static double cost_per_byte_ms_defaults[] = {
  63   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
  64 };
  65 
  66 // these should be pretty consistent
  67 static double constant_other_time_ms_defaults[] = {
  68   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
  69 };
  70 
  71 
  72 static double young_other_cost_per_region_ms_defaults[] = {
  73   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
  74 };
  75 
  76 static double non_young_other_cost_per_region_ms_defaults[] = {
  77   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
  78 };
  79 
  80 // Help class for avoiding interleaved logging
  81 class LineBuffer: public StackObj {
  82 
  83 private:
  84   static const int BUFFER_LEN = 1024;
  85   static const int INDENT_CHARS = 3;
  86   char _buffer[BUFFER_LEN];
  87   int _indent_level;
  88   int _cur;
  89 
  90   void vappend(const char* format, va_list ap) {
  91     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
  92     if (res != -1) {
  93       _cur += res;
  94     } else {
  95       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
  96       _buffer[BUFFER_LEN -1] = 0;
  97       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
  98     }
  99   }
 100 
 101 public:
 102   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
 103     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
 104       _buffer[_cur] = ' ';
 105     }
 106   }
 107 
 108 #ifndef PRODUCT
 109   ~LineBuffer() {
 110     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
 111   }
 112 #endif
 113 
 114   void append(const char* format, ...) {
 115     va_list ap;
 116     va_start(ap, format);
 117     vappend(format, ap);
 118     va_end(ap);
 119   }
 120 
 121   void append_and_print_cr(const char* format, ...) {
 122     va_list ap;
 123     va_start(ap, format);
 124     vappend(format, ap);
 125     va_end(ap);
 126     gclog_or_tty->print_cr("%s", _buffer);
 127     _cur = _indent_level * INDENT_CHARS;
 128   }
 129 };
 130 
 131 G1CollectorPolicy::G1CollectorPolicy() :
 132   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
 133                         ? ParallelGCThreads : 1),
 134 
 135   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 136   _all_pause_times_ms(new NumberSeq()),
 137   _stop_world_start(0.0),
 138   _all_stop_world_times_ms(new NumberSeq()),
 139   _all_yield_times_ms(new NumberSeq()),
 140 
 141   _summary(new Summary()),
 142 
 143   _cur_clear_ct_time_ms(0.0),
 144   _root_region_scan_wait_time_ms(0.0),
 145 
 146   _cur_ref_proc_time_ms(0.0),
 147   _cur_ref_enq_time_ms(0.0),
 148 
 149 #ifndef PRODUCT
 150   _min_clear_cc_time_ms(-1.0),
 151   _max_clear_cc_time_ms(-1.0),
 152   _cur_clear_cc_time_ms(0.0),
 153   _cum_clear_cc_time_ms(0.0),
 154   _num_cc_clears(0L),
 155 #endif
 156 
 157   _aux_num(10),
 158   _all_aux_times_ms(new NumberSeq[_aux_num]),
 159   _cur_aux_start_times_ms(new double[_aux_num]),
 160   _cur_aux_times_ms(new double[_aux_num]),
 161   _cur_aux_times_set(new bool[_aux_num]),
 162 
 163   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 164   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 165 
 166   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 167   _prev_collection_pause_end_ms(0.0),
 168   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 169   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 170   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 171   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 172   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 173   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 174   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 175   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 176   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 177   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 178   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 179   _non_young_other_cost_per_region_ms_seq(
 180                                          new TruncatedSeq(TruncatedSeqLength)),
 181 
 182   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 183   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 184 
 185   _pause_time_target_ms((double) MaxGCPauseMillis),
 186 
 187   _gcs_are_young(true),
 188   _young_pause_num(0),
 189   _mixed_pause_num(0),
 190 
 191   _during_marking(false),
 192   _in_marking_window(false),
 193   _in_marking_window_im(false),
 194 
 195   _recent_prev_end_times_for_all_gcs_sec(
 196                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 197 
 198   _recent_avg_pause_time_ratio(0.0),
 199 
 200   _all_full_gc_times_ms(new NumberSeq()),
 201 
 202   _initiate_conc_mark_if_possible(false),
 203   _during_initial_mark_pause(false),
 204   _last_young_gc(false),
 205   _last_gc_was_young(false),
 206 
 207   _eden_bytes_before_gc(0),
 208   _survivor_bytes_before_gc(0),
 209   _capacity_before_gc(0),
 210 
 211   _eden_cset_region_length(0),
 212   _survivor_cset_region_length(0),
 213   _old_cset_region_length(0),
 214 
 215   _collection_set(NULL),
 216   _collection_set_bytes_used_before(0),
 217 
 218   // Incremental CSet attributes
 219   _inc_cset_build_state(Inactive),
 220   _inc_cset_head(NULL),
 221   _inc_cset_tail(NULL),
 222   _inc_cset_bytes_used_before(0),
 223   _inc_cset_max_finger(NULL),
 224   _inc_cset_recorded_rs_lengths(0),
 225   _inc_cset_recorded_rs_lengths_diffs(0),
 226   _inc_cset_predicted_elapsed_time_ms(0.0),
 227   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
 228 
 229 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 230 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 231 #endif // _MSC_VER
 232 
 233   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
 234                                                  G1YoungSurvRateNumRegionsSummary)),
 235   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
 236                                               G1YoungSurvRateNumRegionsSummary)),
 237   // add here any more surv rate groups
 238   _recorded_survivor_regions(0),
 239   _recorded_survivor_head(NULL),
 240   _recorded_survivor_tail(NULL),
 241   _survivors_age_table(true),
 242 
 243   _gc_overhead_perc(0.0) {
 244 
 245   // Set up the region size and associated fields. Given that the
 246   // policy is created before the heap, we have to set this up here,
 247   // so it's done as soon as possible.
 248   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
 249   HeapRegionRemSet::setup_remset_size();
 250 
 251   G1ErgoVerbose::initialize();
 252   if (PrintAdaptiveSizePolicy) {
 253     // Currently, we only use a single switch for all the heuristics.
 254     G1ErgoVerbose::set_enabled(true);
 255     // Given that we don't currently have a verboseness level
 256     // parameter, we'll hardcode this to high. This can be easily
 257     // changed in the future.
 258     G1ErgoVerbose::set_level(ErgoHigh);
 259   } else {
 260     G1ErgoVerbose::set_enabled(false);
 261   }
 262 
 263   // Verify PLAB sizes
 264   const size_t region_size = HeapRegion::GrainWords;
 265   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
 266     char buffer[128];
 267     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
 268                  OldPLABSize > region_size ? "Old" : "Young", region_size);
 269     vm_exit_during_initialization(buffer);
 270   }
 271 
 272   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 273   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 274 
 275   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
 276   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
 277   _par_last_satb_filtering_times_ms = new double[_parallel_gc_threads];
 278 
 279   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
 280   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
 281 
 282   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
 283 
 284   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
 285 
 286   _par_last_termination_times_ms = new double[_parallel_gc_threads];
 287   _par_last_termination_attempts = new double[_parallel_gc_threads];
 288   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
 289   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
 290   _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
 291 
 292   int index;
 293   if (ParallelGCThreads == 0)
 294     index = 0;
 295   else if (ParallelGCThreads > 8)
 296     index = 7;
 297   else
 298     index = ParallelGCThreads - 1;
 299 
 300   _pending_card_diff_seq->add(0.0);
 301   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 302   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 303   _young_cards_per_entry_ratio_seq->add(
 304                                   young_cards_per_entry_ratio_defaults[index]);
 305   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 306   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 307   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 308   _young_other_cost_per_region_ms_seq->add(
 309                                young_other_cost_per_region_ms_defaults[index]);
 310   _non_young_other_cost_per_region_ms_seq->add(
 311                            non_young_other_cost_per_region_ms_defaults[index]);
 312 
 313   // Below, we might need to calculate the pause time target based on
 314   // the pause interval. When we do so we are going to give G1 maximum
 315   // flexibility and allow it to do pauses when it needs to. So, we'll
 316   // arrange that the pause interval to be pause time target + 1 to
 317   // ensure that a) the pause time target is maximized with respect to
 318   // the pause interval and b) we maintain the invariant that pause
 319   // time target < pause interval. If the user does not want this
 320   // maximum flexibility, they will have to set the pause interval
 321   // explicitly.
 322 
 323   // First make sure that, if either parameter is set, its value is
 324   // reasonable.
 325   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 326     if (MaxGCPauseMillis < 1) {
 327       vm_exit_during_initialization("MaxGCPauseMillis should be "
 328                                     "greater than 0");
 329     }
 330   }
 331   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 332     if (GCPauseIntervalMillis < 1) {
 333       vm_exit_during_initialization("GCPauseIntervalMillis should be "
 334                                     "greater than 0");
 335     }
 336   }
 337 
 338   // Then, if the pause time target parameter was not set, set it to
 339   // the default value.
 340   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 341     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 342       // The default pause time target in G1 is 200ms
 343       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
 344     } else {
 345       // We do not allow the pause interval to be set without the
 346       // pause time target
 347       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
 348                                     "without setting MaxGCPauseMillis");
 349     }
 350   }
 351 
 352   // Then, if the interval parameter was not set, set it according to
 353   // the pause time target (this will also deal with the case when the
 354   // pause time target is the default value).
 355   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 356     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 357   }
 358 
 359   // Finally, make sure that the two parameters are consistent.
 360   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 361     char buffer[256];
 362     jio_snprintf(buffer, 256,
 363                  "MaxGCPauseMillis (%u) should be less than "
 364                  "GCPauseIntervalMillis (%u)",
 365                  MaxGCPauseMillis, GCPauseIntervalMillis);
 366     vm_exit_during_initialization(buffer);
 367   }
 368 
 369   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 370   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 371   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 372   _sigma = (double) G1ConfidencePercent / 100.0;
 373 
 374   // start conservatively (around 50ms is about right)
 375   _concurrent_mark_remark_times_ms->add(0.05);
 376   _concurrent_mark_cleanup_times_ms->add(0.20);
 377   _tenuring_threshold = MaxTenuringThreshold;
 378   // _max_survivor_regions will be calculated by
 379   // update_young_list_target_length() during initialization.
 380   _max_survivor_regions = 0;
 381 
 382   assert(GCTimeRatio > 0,
 383          "we should have set it to a default value set_g1_gc_flags() "
 384          "if a user set it to 0");
 385   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 386 
 387   uintx reserve_perc = G1ReservePercent;
 388   // Put an artificial ceiling on this so that it's not set to a silly value.
 389   if (reserve_perc > 50) {
 390     reserve_perc = 50;
 391     warning("G1ReservePercent is set to a value that is too large, "
 392             "it's been updated to %u", reserve_perc);
 393   }
 394   _reserve_factor = (double) reserve_perc / 100.0;
 395   // This will be set when the heap is expanded
 396   // for the first time during initialization.
 397   _reserve_regions = 0;
 398 
 399   initialize_all();
 400   _collectionSetChooser = new CollectionSetChooser();
 401   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
 402 }
 403 
 404 void G1CollectorPolicy::initialize_flags() {
 405   set_min_alignment(HeapRegion::GrainBytes);
 406   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
 407   if (SurvivorRatio < 1) {
 408     vm_exit_during_initialization("Invalid survivor ratio specified");
 409   }
 410   CollectorPolicy::initialize_flags();
 411 }
 412 
 413 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
 414   assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
 415   assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
 416   assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
 417 
 418   if (FLAG_IS_CMDLINE(NewRatio)) {
 419     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
 420       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
 421     } else {
 422       _sizer_kind = SizerNewRatio;
 423       _adaptive_size = false;
 424       return;
 425     }
 426   }
 427 
 428   if (FLAG_IS_CMDLINE(NewSize)) {
 429     _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
 430                                      1U);
 431     if (FLAG_IS_CMDLINE(MaxNewSize)) {
 432       _max_desired_young_length =
 433                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
 434                                   1U);
 435       _sizer_kind = SizerMaxAndNewSize;
 436       _adaptive_size = _min_desired_young_length == _max_desired_young_length;
 437     } else {
 438       _sizer_kind = SizerNewSizeOnly;
 439     }
 440   } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
 441     _max_desired_young_length =
 442                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
 443                                   1U);
 444     _sizer_kind = SizerMaxNewSizeOnly;
 445   }
 446 }
 447 
 448 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
 449   uint default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
 450   return MAX2(1U, default_value);
 451 }
 452 
 453 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
 454   uint default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
 455   return MAX2(1U, default_value);
 456 }
 457 
 458 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
 459   assert(new_number_of_heap_regions > 0, "Heap must be initialized");
 460 
 461   switch (_sizer_kind) {
 462     case SizerDefaults:
 463       _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
 464       _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
 465       break;
 466     case SizerNewSizeOnly:
 467       _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
 468       _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
 469       break;
 470     case SizerMaxNewSizeOnly:
 471       _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
 472       _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
 473       break;
 474     case SizerMaxAndNewSize:
 475       // Do nothing. Values set on the command line, don't update them at runtime.
 476       break;
 477     case SizerNewRatio:
 478       _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
 479       _max_desired_young_length = _min_desired_young_length;
 480       break;
 481     default:
 482       ShouldNotReachHere();
 483   }
 484 
 485   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
 486 }
 487 
 488 void G1CollectorPolicy::init() {
 489   // Set aside an initial future to_space.
 490   _g1 = G1CollectedHeap::heap();
 491 
 492   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 493 
 494   initialize_gc_policy_counters();
 495 
 496   if (adaptive_young_list_length()) {
 497     _young_list_fixed_length = 0;
 498   } else {
 499     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
 500   }
 501   _free_regions_at_end_of_collection = _g1->free_regions();
 502   update_young_list_target_length();
 503   _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
 504 
 505   // We may immediately start allocating regions and placing them on the
 506   // collection set list. Initialize the per-collection set info
 507   start_incremental_cset_building();
 508 }
 509 
 510 // Create the jstat counters for the policy.
 511 void G1CollectorPolicy::initialize_gc_policy_counters() {
 512   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 513 }
 514 
 515 bool G1CollectorPolicy::predict_will_fit(uint young_length,
 516                                          double base_time_ms,
 517                                          uint base_free_regions,
 518                                          double target_pause_time_ms) {
 519   if (young_length >= base_free_regions) {
 520     // end condition 1: not enough space for the young regions
 521     return false;
 522   }
 523 
 524   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
 525   size_t bytes_to_copy =
 526                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 527   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
 528   double young_other_time_ms = predict_young_other_time_ms(young_length);
 529   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
 530   if (pause_time_ms > target_pause_time_ms) {
 531     // end condition 2: prediction is over the target pause time
 532     return false;
 533   }
 534 
 535   size_t free_bytes =
 536                    (base_free_regions - young_length) * HeapRegion::GrainBytes;
 537   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
 538     // end condition 3: out-of-space (conservatively!)
 539     return false;
 540   }
 541 
 542   // success!
 543   return true;
 544 }
 545 
 546 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
 547   // re-calculate the necessary reserve
 548   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
 549   // We use ceiling so that if reserve_regions_d is > 0.0 (but
 550   // smaller than 1.0) we'll get 1.
 551   _reserve_regions = (uint) ceil(reserve_regions_d);
 552 
 553   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 554 }
 555 
 556 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
 557                                                        uint base_min_length) {
 558   uint desired_min_length = 0;
 559   if (adaptive_young_list_length()) {
 560     if (_alloc_rate_ms_seq->num() > 3) {
 561       double now_sec = os::elapsedTime();
 562       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 563       double alloc_rate_ms = predict_alloc_rate_ms();
 564       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 565     } else {
 566       // otherwise we don't have enough info to make the prediction
 567     }
 568   }
 569   desired_min_length += base_min_length;
 570   // make sure we don't go below any user-defined minimum bound
 571   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 572 }
 573 
 574 uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
 575   // Here, we might want to also take into account any additional
 576   // constraints (i.e., user-defined minimum bound). Currently, we
 577   // effectively don't set this bound.
 578   return _young_gen_sizer->max_desired_young_length();
 579 }
 580 
 581 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
 582   if (rs_lengths == (size_t) -1) {
 583     // if it's set to the default value (-1), we should predict it;
 584     // otherwise, use the given value.
 585     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
 586   }
 587 
 588   // Calculate the absolute and desired min bounds.
 589 
 590   // This is how many young regions we already have (currently: the survivors).
 591   uint base_min_length = recorded_survivor_regions();
 592   // This is the absolute minimum young length, which ensures that we
 593   // can allocate one eden region in the worst-case.
 594   uint absolute_min_length = base_min_length + 1;
 595   uint desired_min_length =
 596                      calculate_young_list_desired_min_length(base_min_length);
 597   if (desired_min_length < absolute_min_length) {
 598     desired_min_length = absolute_min_length;
 599   }
 600 
 601   // Calculate the absolute and desired max bounds.
 602 
 603   // We will try our best not to "eat" into the reserve.
 604   uint absolute_max_length = 0;
 605   if (_free_regions_at_end_of_collection > _reserve_regions) {
 606     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
 607   }
 608   uint desired_max_length = calculate_young_list_desired_max_length();
 609   if (desired_max_length > absolute_max_length) {
 610     desired_max_length = absolute_max_length;
 611   }
 612 
 613   uint young_list_target_length = 0;
 614   if (adaptive_young_list_length()) {
 615     if (gcs_are_young()) {
 616       young_list_target_length =
 617                         calculate_young_list_target_length(rs_lengths,
 618                                                            base_min_length,
 619                                                            desired_min_length,
 620                                                            desired_max_length);
 621       _rs_lengths_prediction = rs_lengths;
 622     } else {
 623       // Don't calculate anything and let the code below bound it to
 624       // the desired_min_length, i.e., do the next GC as soon as
 625       // possible to maximize how many old regions we can add to it.
 626     }
 627   } else {
 628     // The user asked for a fixed young gen so we'll fix the young gen
 629     // whether the next GC is young or mixed.
 630     young_list_target_length = _young_list_fixed_length;
 631   }
 632 
 633   // Make sure we don't go over the desired max length, nor under the
 634   // desired min length. In case they clash, desired_min_length wins
 635   // which is why that test is second.
 636   if (young_list_target_length > desired_max_length) {
 637     young_list_target_length = desired_max_length;
 638   }
 639   if (young_list_target_length < desired_min_length) {
 640     young_list_target_length = desired_min_length;
 641   }
 642 
 643   assert(young_list_target_length > recorded_survivor_regions(),
 644          "we should be able to allocate at least one eden region");
 645   assert(young_list_target_length >= absolute_min_length, "post-condition");
 646   _young_list_target_length = young_list_target_length;
 647 
 648   update_max_gc_locker_expansion();
 649 }
 650 
 651 uint
 652 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
 653                                                      uint base_min_length,
 654                                                      uint desired_min_length,
 655                                                      uint desired_max_length) {
 656   assert(adaptive_young_list_length(), "pre-condition");
 657   assert(gcs_are_young(), "only call this for young GCs");
 658 
 659   // In case some edge-condition makes the desired max length too small...
 660   if (desired_max_length <= desired_min_length) {
 661     return desired_min_length;
 662   }
 663 
 664   // We'll adjust min_young_length and max_young_length not to include
 665   // the already allocated young regions (i.e., so they reflect the
 666   // min and max eden regions we'll allocate). The base_min_length
 667   // will be reflected in the predictions by the
 668   // survivor_regions_evac_time prediction.
 669   assert(desired_min_length > base_min_length, "invariant");
 670   uint min_young_length = desired_min_length - base_min_length;
 671   assert(desired_max_length > base_min_length, "invariant");
 672   uint max_young_length = desired_max_length - base_min_length;
 673 
 674   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 675   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
 676   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
 677   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
 678   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
 679   double base_time_ms =
 680     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
 681     survivor_regions_evac_time;
 682   uint available_free_regions = _free_regions_at_end_of_collection;
 683   uint base_free_regions = 0;
 684   if (available_free_regions > _reserve_regions) {
 685     base_free_regions = available_free_regions - _reserve_regions;
 686   }
 687 
 688   // Here, we will make sure that the shortest young length that
 689   // makes sense fits within the target pause time.
 690 
 691   if (predict_will_fit(min_young_length, base_time_ms,
 692                        base_free_regions, target_pause_time_ms)) {
 693     // The shortest young length will fit into the target pause time;
 694     // we'll now check whether the absolute maximum number of young
 695     // regions will fit in the target pause time. If not, we'll do
 696     // a binary search between min_young_length and max_young_length.
 697     if (predict_will_fit(max_young_length, base_time_ms,
 698                          base_free_regions, target_pause_time_ms)) {
 699       // The maximum young length will fit into the target pause time.
 700       // We are done so set min young length to the maximum length (as
 701       // the result is assumed to be returned in min_young_length).
 702       min_young_length = max_young_length;
 703     } else {
 704       // The maximum possible number of young regions will not fit within
 705       // the target pause time so we'll search for the optimal
 706       // length. The loop invariants are:
 707       //
 708       // min_young_length < max_young_length
 709       // min_young_length is known to fit into the target pause time
 710       // max_young_length is known not to fit into the target pause time
 711       //
 712       // Going into the loop we know the above hold as we've just
 713       // checked them. Every time around the loop we check whether
 714       // the middle value between min_young_length and
 715       // max_young_length fits into the target pause time. If it
 716       // does, it becomes the new min. If it doesn't, it becomes
 717       // the new max. This way we maintain the loop invariants.
 718 
 719       assert(min_young_length < max_young_length, "invariant");
 720       uint diff = (max_young_length - min_young_length) / 2;
 721       while (diff > 0) {
 722         uint young_length = min_young_length + diff;
 723         if (predict_will_fit(young_length, base_time_ms,
 724                              base_free_regions, target_pause_time_ms)) {
 725           min_young_length = young_length;
 726         } else {
 727           max_young_length = young_length;
 728         }
 729         assert(min_young_length <  max_young_length, "invariant");
 730         diff = (max_young_length - min_young_length) / 2;
 731       }
 732       // The results is min_young_length which, according to the
 733       // loop invariants, should fit within the target pause time.
 734 
 735       // These are the post-conditions of the binary search above:
 736       assert(min_young_length < max_young_length,
 737              "otherwise we should have discovered that max_young_length "
 738              "fits into the pause target and not done the binary search");
 739       assert(predict_will_fit(min_young_length, base_time_ms,
 740                               base_free_regions, target_pause_time_ms),
 741              "min_young_length, the result of the binary search, should "
 742              "fit into the pause target");
 743       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
 744                                base_free_regions, target_pause_time_ms),
 745              "min_young_length, the result of the binary search, should be "
 746              "optimal, so no larger length should fit into the pause target");
 747     }
 748   } else {
 749     // Even the minimum length doesn't fit into the pause time
 750     // target, return it as the result nevertheless.
 751   }
 752   return base_min_length + min_young_length;
 753 }
 754 
 755 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
 756   double survivor_regions_evac_time = 0.0;
 757   for (HeapRegion * r = _recorded_survivor_head;
 758        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
 759        r = r->get_next_young_region()) {
 760     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
 761   }
 762   return survivor_regions_evac_time;
 763 }
 764 
 765 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
 766   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 767 
 768   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
 769   if (rs_lengths > _rs_lengths_prediction) {
 770     // add 10% to avoid having to recalculate often
 771     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 772     update_young_list_target_length(rs_lengths_prediction);
 773   }
 774 }
 775 
 776 
 777 
 778 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
 779                                                bool is_tlab,
 780                                                bool* gc_overhead_limit_was_exceeded) {
 781   guarantee(false, "Not using this policy feature yet.");
 782   return NULL;
 783 }
 784 
 785 // This method controls how a collector handles one or more
 786 // of its generations being fully allocated.
 787 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
 788                                                        bool is_tlab) {
 789   guarantee(false, "Not using this policy feature yet.");
 790   return NULL;
 791 }
 792 
 793 
 794 #ifndef PRODUCT
 795 bool G1CollectorPolicy::verify_young_ages() {
 796   HeapRegion* head = _g1->young_list()->first_region();
 797   return
 798     verify_young_ages(head, _short_lived_surv_rate_group);
 799   // also call verify_young_ages on any additional surv rate groups
 800 }
 801 
 802 bool
 803 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 804                                      SurvRateGroup *surv_rate_group) {
 805   guarantee( surv_rate_group != NULL, "pre-condition" );
 806 
 807   const char* name = surv_rate_group->name();
 808   bool ret = true;
 809   int prev_age = -1;
 810 
 811   for (HeapRegion* curr = head;
 812        curr != NULL;
 813        curr = curr->get_next_young_region()) {
 814     SurvRateGroup* group = curr->surv_rate_group();
 815     if (group == NULL && !curr->is_survivor()) {
 816       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
 817       ret = false;
 818     }
 819 
 820     if (surv_rate_group == group) {
 821       int age = curr->age_in_surv_rate_group();
 822 
 823       if (age < 0) {
 824         gclog_or_tty->print_cr("## %s: encountered negative age", name);
 825         ret = false;
 826       }
 827 
 828       if (age <= prev_age) {
 829         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
 830                                "(%d, %d)", name, age, prev_age);
 831         ret = false;
 832       }
 833       prev_age = age;
 834     }
 835   }
 836 
 837   return ret;
 838 }
 839 #endif // PRODUCT
 840 
 841 void G1CollectorPolicy::record_full_collection_start() {
 842   _cur_collection_start_sec = os::elapsedTime();
 843   // Release the future to-space so that it is available for compaction into.
 844   _g1->set_full_collection();
 845 }
 846 
 847 void G1CollectorPolicy::record_full_collection_end() {
 848   // Consider this like a collection pause for the purposes of allocation
 849   // since last pause.
 850   double end_sec = os::elapsedTime();
 851   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
 852   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 853 
 854   _all_full_gc_times_ms->add(full_gc_time_ms);
 855 
 856   update_recent_gc_times(end_sec, full_gc_time_ms);
 857 
 858   _g1->clear_full_collection();
 859 
 860   // "Nuke" the heuristics that control the young/mixed GC
 861   // transitions and make sure we start with young GCs after the Full GC.
 862   set_gcs_are_young(true);
 863   _last_young_gc = false;
 864   clear_initiate_conc_mark_if_possible();
 865   clear_during_initial_mark_pause();
 866   _in_marking_window = false;
 867   _in_marking_window_im = false;
 868 
 869   _short_lived_surv_rate_group->start_adding_regions();
 870   // also call this on any additional surv rate groups
 871 
 872   record_survivor_regions(0, NULL, NULL);
 873 
 874   _free_regions_at_end_of_collection = _g1->free_regions();
 875   // Reset survivors SurvRateGroup.
 876   _survivor_surv_rate_group->reset();
 877   update_young_list_target_length();
 878   _collectionSetChooser->clear();
 879 }
 880 
 881 void G1CollectorPolicy::record_stop_world_start() {
 882   _stop_world_start = os::elapsedTime();
 883 }
 884 
 885 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
 886                                                       size_t start_used) {
 887   if (G1Log::finer()) {
 888     gclog_or_tty->stamp(PrintGCTimeStamps);
 889     gclog_or_tty->print("[GC pause");
 890     gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
 891   }
 892 
 893   // We only need to do this here as the policy will only be applied
 894   // to the GC we're about to start. so, no point is calculating this
 895   // every time we calculate / recalculate the target young length.
 896   update_survivors_policy();
 897 
 898   assert(_g1->used() == _g1->recalculate_used(),
 899          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 900                  _g1->used(), _g1->recalculate_used()));
 901 
 902   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 903   _all_stop_world_times_ms->add(s_w_t_ms);
 904   _stop_world_start = 0.0;
 905 
 906   _cur_collection_start_sec = start_time_sec;
 907   _cur_collection_pause_used_at_start_bytes = start_used;
 908   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
 909   _pending_cards = _g1->pending_card_num();
 910   _max_pending_cards = _g1->max_pending_card_num();
 911 
 912   _bytes_in_collection_set_before_gc = 0;
 913   _bytes_copied_during_gc = 0;
 914 
 915   YoungList* young_list = _g1->young_list();
 916   _eden_bytes_before_gc = young_list->eden_used_bytes();
 917   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
 918   _capacity_before_gc = _g1->capacity();
 919 
 920 #ifdef DEBUG
 921   // initialise these to something well known so that we can spot
 922   // if they are not set properly
 923 
 924   for (int i = 0; i < _parallel_gc_threads; ++i) {
 925     _par_last_gc_worker_start_times_ms[i] = -1234.0;
 926     _par_last_ext_root_scan_times_ms[i] = -1234.0;
 927     _par_last_satb_filtering_times_ms[i] = -1234.0;
 928     _par_last_update_rs_times_ms[i] = -1234.0;
 929     _par_last_update_rs_processed_buffers[i] = -1234.0;
 930     _par_last_scan_rs_times_ms[i] = -1234.0;
 931     _par_last_obj_copy_times_ms[i] = -1234.0;
 932     _par_last_termination_times_ms[i] = -1234.0;
 933     _par_last_termination_attempts[i] = -1234.0;
 934     _par_last_gc_worker_end_times_ms[i] = -1234.0;
 935     _par_last_gc_worker_times_ms[i] = -1234.0;
 936     _par_last_gc_worker_other_times_ms[i] = -1234.0;
 937   }
 938 #endif
 939 
 940   for (int i = 0; i < _aux_num; ++i) {
 941     _cur_aux_times_ms[i] = 0.0;
 942     _cur_aux_times_set[i] = false;
 943   }
 944 
 945   // This is initialized to zero here and is set during the evacuation
 946   // pause if we actually waited for the root region scanning to finish.
 947   _root_region_scan_wait_time_ms = 0.0;
 948 
 949   _last_gc_was_young = false;
 950 
 951   // do that for any other surv rate groups
 952   _short_lived_surv_rate_group->stop_adding_regions();
 953   _survivors_age_table.clear();
 954 
 955   assert( verify_young_ages(), "region age verification" );
 956 }
 957 
 958 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 959                                                    mark_init_elapsed_time_ms) {
 960   _during_marking = true;
 961   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
 962   clear_during_initial_mark_pause();
 963   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 964 }
 965 
 966 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 967   _mark_remark_start_sec = os::elapsedTime();
 968   _during_marking = false;
 969 }
 970 
 971 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 972   double end_time_sec = os::elapsedTime();
 973   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 974   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 975   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 976   _prev_collection_pause_end_ms += elapsed_time_ms;
 977 
 978   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
 979 }
 980 
 981 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 982   _mark_cleanup_start_sec = os::elapsedTime();
 983 }
 984 
 985 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 986   _last_young_gc = true;
 987   _in_marking_window = false;
 988 }
 989 
 990 void G1CollectorPolicy::record_concurrent_pause() {
 991   if (_stop_world_start > 0.0) {
 992     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
 993     _all_yield_times_ms->add(yield_ms);
 994   }
 995 }
 996 
 997 void G1CollectorPolicy::record_concurrent_pause_end() {
 998 }
 999 
1000 template<class T>
1001 T sum_of(T* sum_arr, int start, int n, int N) {
1002   T sum = (T)0;
1003   for (int i = 0; i < n; i++) {
1004     int j = (start + i) % N;
1005     sum += sum_arr[j];
1006   }
1007   return sum;
1008 }
1009 
1010 void G1CollectorPolicy::print_par_stats(int level,
1011                                         const char* str,
1012                                         double* data) {
1013   double min = data[0], max = data[0];
1014   double total = 0.0;
1015   LineBuffer buf(level);
1016   buf.append("[%s (ms):", str);
1017   for (uint i = 0; i < no_of_gc_threads(); ++i) {
1018     double val = data[i];
1019     if (val < min)
1020       min = val;
1021     if (val > max)
1022       max = val;
1023     total += val;
1024     if (G1Log::finest()) {
1025       buf.append("  %.1lf", val);
1026     }
1027   }
1028 
1029   if (G1Log::finest()) {
1030     buf.append_and_print_cr("");
1031   }
1032   double avg = total / (double) no_of_gc_threads();
1033   buf.append_and_print_cr(" Avg: %.1lf Min: %.1lf Max: %.1lf Diff: %.1lf]",
1034     avg, min, max, max - min);
1035 }
1036 
1037 void G1CollectorPolicy::print_par_sizes(int level,
1038                                         const char* str,
1039                                         double* data) {
1040   double min = data[0], max = data[0];
1041   double total = 0.0;
1042   LineBuffer buf(level);
1043   buf.append("[%s :", str);
1044   for (uint i = 0; i < no_of_gc_threads(); ++i) {
1045     double val = data[i];
1046     if (val < min)
1047       min = val;
1048     if (val > max)
1049       max = val;
1050     total += val;
1051     buf.append(" %d", (int) val);
1052   }
1053   buf.append_and_print_cr("");
1054   double avg = total / (double) no_of_gc_threads();
1055   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
1056     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
1057 }
1058 
1059 void G1CollectorPolicy::print_stats(int level,
1060                                     const char* str,
1061                                     double value) {
1062   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
1063 }
1064 
1065 void G1CollectorPolicy::print_stats(int level,
1066                                     const char* str,
1067                                     int value) {
1068   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
1069 }
1070 
1071 double G1CollectorPolicy::avg_value(double* data) {
1072   if (G1CollectedHeap::use_parallel_gc_threads()) {
1073     double ret = 0.0;
1074     for (uint i = 0; i < no_of_gc_threads(); ++i) {
1075       ret += data[i];
1076     }
1077     return ret / (double) no_of_gc_threads();
1078   } else {
1079     return data[0];
1080   }
1081 }
1082 
1083 double G1CollectorPolicy::max_value(double* data) {
1084   if (G1CollectedHeap::use_parallel_gc_threads()) {
1085     double ret = data[0];
1086     for (uint i = 1; i < no_of_gc_threads(); ++i) {
1087       if (data[i] > ret) {
1088         ret = data[i];
1089       }
1090     }
1091     return ret;
1092   } else {
1093     return data[0];
1094   }
1095 }
1096 
1097 double G1CollectorPolicy::sum_of_values(double* data) {
1098   if (G1CollectedHeap::use_parallel_gc_threads()) {
1099     double sum = 0.0;
1100     for (uint i = 0; i < no_of_gc_threads(); i++) {
1101       sum += data[i];
1102     }
1103     return sum;
1104   } else {
1105     return data[0];
1106   }
1107 }
1108 
1109 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
1110   double ret = data1[0] + data2[0];
1111 
1112   if (G1CollectedHeap::use_parallel_gc_threads()) {
1113     for (uint i = 1; i < no_of_gc_threads(); ++i) {
1114       double data = data1[i] + data2[i];
1115       if (data > ret) {
1116         ret = data;
1117       }
1118     }
1119   }
1120   return ret;
1121 }
1122 
1123 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
1124   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
1125     return false;
1126   }
1127 
1128   size_t marking_initiating_used_threshold =
1129     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
1130   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
1131   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
1132 
1133   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
1134     if (gcs_are_young()) {
1135       ergo_verbose5(ErgoConcCycles,
1136         "request concurrent cycle initiation",
1137         ergo_format_reason("occupancy higher than threshold")
1138         ergo_format_byte("occupancy")
1139         ergo_format_byte("allocation request")
1140         ergo_format_byte_perc("threshold")
1141         ergo_format_str("source"),
1142         cur_used_bytes,
1143         alloc_byte_size,
1144         marking_initiating_used_threshold,
1145         (double) InitiatingHeapOccupancyPercent,
1146         source);
1147       return true;
1148     } else {
1149       ergo_verbose5(ErgoConcCycles,
1150         "do not request concurrent cycle initiation",
1151         ergo_format_reason("still doing mixed collections")
1152         ergo_format_byte("occupancy")
1153         ergo_format_byte("allocation request")
1154         ergo_format_byte_perc("threshold")
1155         ergo_format_str("source"),
1156         cur_used_bytes,
1157         alloc_byte_size,
1158         marking_initiating_used_threshold,
1159         (double) InitiatingHeapOccupancyPercent,
1160         source);
1161     }
1162   }
1163 
1164   return false;
1165 }
1166 
1167 // Anything below that is considered to be zero
1168 #define MIN_TIMER_GRANULARITY 0.0000001
1169 
1170 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
1171   double end_time_sec = os::elapsedTime();
1172   double elapsed_ms = _last_pause_time_ms;
1173   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1174   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
1175          "otherwise, the subtraction below does not make sense");
1176   size_t rs_size =
1177             _cur_collection_pause_used_regions_at_start - cset_region_length();
1178   size_t cur_used_bytes = _g1->used();
1179   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1180   bool last_pause_included_initial_mark = false;
1181   bool update_stats = !_g1->evacuation_failed();
1182   set_no_of_gc_threads(no_of_gc_threads);
1183 
1184 #ifndef PRODUCT
1185   if (G1YoungSurvRateVerbose) {
1186     gclog_or_tty->print_cr("");
1187     _short_lived_surv_rate_group->print();
1188     // do that for any other surv rate groups too
1189   }
1190 #endif // PRODUCT
1191 
1192   last_pause_included_initial_mark = during_initial_mark_pause();
1193   if (last_pause_included_initial_mark) {
1194     record_concurrent_mark_init_end(0.0);
1195   } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
1196     // Note: this might have already been set, if during the last
1197     // pause we decided to start a cycle but at the beginning of
1198     // this pause we decided to postpone it. That's OK.
1199     set_initiate_conc_mark_if_possible();
1200   }
1201 
1202   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
1203                           end_time_sec, false);
1204 
1205   // This assert is exempted when we're doing parallel collection pauses,
1206   // because the fragmentation caused by the parallel GC allocation buffers
1207   // can lead to more memory being used during collection than was used
1208   // before. Best leave this out until the fragmentation problem is fixed.
1209   // Pauses in which evacuation failed can also lead to negative
1210   // collections, since no space is reclaimed from a region containing an
1211   // object whose evacuation failed.
1212   // Further, we're now always doing parallel collection.  But I'm still
1213   // leaving this here as a placeholder for a more precise assertion later.
1214   // (DLD, 10/05.)
1215   assert((true || parallel) // Always using GC LABs now.
1216          || _g1->evacuation_failed()
1217          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
1218          "Negative collection");
1219 
1220   size_t freed_bytes =
1221     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
1222   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
1223 
1224   double survival_fraction =
1225     (double)surviving_bytes/
1226     (double)_collection_set_bytes_used_before;
1227 
1228   // These values are used to update the summary information that is
1229   // displayed when TraceGen0Time is enabled, and are output as part
1230   // of the "finer" output, in the non-parallel case.
1231 
1232   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
1233   double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
1234   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
1235   double update_rs_processed_buffers =
1236     sum_of_values(_par_last_update_rs_processed_buffers);
1237   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
1238   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
1239   double termination_time = avg_value(_par_last_termination_times_ms);
1240 
1241   double known_time = ext_root_scan_time +
1242                       satb_filtering_time +
1243                       update_rs_time +
1244                       scan_rs_time +
1245                       obj_copy_time;
1246 
1247   double other_time_ms = elapsed_ms;
1248 
1249   // Subtract the root region scanning wait time. It's initialized to
1250   // zero at the start of the pause.
1251   other_time_ms -= _root_region_scan_wait_time_ms;
1252 
1253   if (parallel) {
1254     other_time_ms -= _cur_collection_par_time_ms;
1255   } else {
1256     other_time_ms -= known_time;
1257   }
1258 
1259   // Now subtract the time taken to fix up roots in generated code
1260   other_time_ms -= _cur_collection_code_root_fixup_time_ms;
1261 
1262   // Subtract the time taken to clean the card table from the
1263   // current value of "other time"
1264   other_time_ms -= _cur_clear_ct_time_ms;
1265 
1266   // TraceGen0Time and TraceGen1Time summary info updating.
1267   _all_pause_times_ms->add(elapsed_ms);
1268 
1269   if (update_stats) {
1270     _summary->record_total_time_ms(elapsed_ms);
1271     _summary->record_other_time_ms(other_time_ms);
1272 
1273     MainBodySummary* body_summary = _summary->main_body_summary();
1274     assert(body_summary != NULL, "should not be null!");
1275 
1276     body_summary->record_root_region_scan_wait_time_ms(
1277                                                _root_region_scan_wait_time_ms);
1278     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
1279     body_summary->record_satb_filtering_time_ms(satb_filtering_time);
1280     body_summary->record_update_rs_time_ms(update_rs_time);
1281     body_summary->record_scan_rs_time_ms(scan_rs_time);
1282     body_summary->record_obj_copy_time_ms(obj_copy_time);
1283 
1284     if (parallel) {
1285       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
1286       body_summary->record_termination_time_ms(termination_time);
1287 
1288       double parallel_known_time = known_time + termination_time;
1289       double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
1290       body_summary->record_parallel_other_time_ms(parallel_other_time);
1291     }
1292 
1293     body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
1294 
1295     // We exempt parallel collection from this check because Alloc Buffer
1296     // fragmentation can produce negative collections.  Same with evac
1297     // failure.
1298     // Further, we're now always doing parallel collection.  But I'm still
1299     // leaving this here as a placeholder for a more precise assertion later.
1300     // (DLD, 10/05.
1301     assert((true || parallel)
1302            || _g1->evacuation_failed()
1303            || surviving_bytes <= _collection_set_bytes_used_before,
1304            "Or else negative collection!");
1305 
1306     // this is where we update the allocation rate of the application
1307     double app_time_ms =
1308       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
1309     if (app_time_ms < MIN_TIMER_GRANULARITY) {
1310       // This usually happens due to the timer not having the required
1311       // granularity. Some Linuxes are the usual culprits.
1312       // We'll just set it to something (arbitrarily) small.
1313       app_time_ms = 1.0;
1314     }
1315     // We maintain the invariant that all objects allocated by mutator
1316     // threads will be allocated out of eden regions. So, we can use
1317     // the eden region number allocated since the previous GC to
1318     // calculate the application's allocate rate. The only exception
1319     // to that is humongous objects that are allocated separately. But
1320     // given that humongous object allocations do not really affect
1321     // either the pause's duration nor when the next pause will take
1322     // place we can safely ignore them here.
1323     uint regions_allocated = eden_cset_region_length();
1324     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1325     _alloc_rate_ms_seq->add(alloc_rate_ms);
1326 
1327     double interval_ms =
1328       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1329     update_recent_gc_times(end_time_sec, elapsed_ms);
1330     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1331     if (recent_avg_pause_time_ratio() < 0.0 ||
1332         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1333 #ifndef PRODUCT
1334       // Dump info to allow post-facto debugging
1335       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
1336       gclog_or_tty->print_cr("-------------------------------------------");
1337       gclog_or_tty->print_cr("Recent GC Times (ms):");
1338       _recent_gc_times_ms->dump();
1339       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
1340       _recent_prev_end_times_for_all_gcs_sec->dump();
1341       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
1342                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
1343       // In debug mode, terminate the JVM if the user wants to debug at this point.
1344       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
1345 #endif  // !PRODUCT
1346       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1347       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1348       if (_recent_avg_pause_time_ratio < 0.0) {
1349         _recent_avg_pause_time_ratio = 0.0;
1350       } else {
1351         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1352         _recent_avg_pause_time_ratio = 1.0;
1353       }
1354     }
1355   }
1356 
1357   for (int i = 0; i < _aux_num; ++i) {
1358     if (_cur_aux_times_set[i]) {
1359       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
1360     }
1361   }
1362 
1363   if (G1Log::finer()) {
1364     bool print_marking_info =
1365       _g1->mark_in_progress() && !last_pause_included_initial_mark;
1366 
1367     gclog_or_tty->print_cr("%s, %1.8lf secs]",
1368                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
1369                            elapsed_ms / 1000.0);
1370 
1371     if (_root_region_scan_wait_time_ms > 0.0) {
1372       print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
1373     }
1374     if (parallel) {
1375       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
1376       print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
1377       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
1378       if (print_marking_info) {
1379         print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
1380       }
1381       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
1382       if (G1Log::finest()) {
1383         print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
1384       }
1385       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
1386       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
1387       print_par_stats(2, "Termination", _par_last_termination_times_ms);
1388       if (G1Log::finest()) {
1389         print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
1390       }
1391 
1392       for (int i = 0; i < _parallel_gc_threads; i++) {
1393         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
1394                                           _par_last_gc_worker_start_times_ms[i];
1395 
1396         double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
1397                                    _par_last_satb_filtering_times_ms[i] +
1398                                    _par_last_update_rs_times_ms[i] +
1399                                    _par_last_scan_rs_times_ms[i] +
1400                                    _par_last_obj_copy_times_ms[i] +
1401                                    _par_last_termination_times_ms[i];
1402 
1403         _par_last_gc_worker_other_times_ms[i] = _par_last_gc_worker_times_ms[i] -
1404                                                 worker_known_time;
1405       }
1406 
1407       print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
1408       print_par_stats(2, "GC Worker Total", _par_last_gc_worker_times_ms);
1409       print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
1410     } else {
1411       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
1412       if (print_marking_info) {
1413         print_stats(1, "SATB Filtering", satb_filtering_time);
1414       }
1415       print_stats(1, "Update RS", update_rs_time);
1416       if (G1Log::finest()) {
1417         print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
1418       }
1419       print_stats(1, "Scan RS", scan_rs_time);
1420       print_stats(1, "Object Copying", obj_copy_time);
1421     }
1422     print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
1423     print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
1424 #ifndef PRODUCT
1425     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
1426     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
1427     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
1428     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
1429     if (_num_cc_clears > 0) {
1430       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
1431     }
1432 #endif
1433     print_stats(1, "Other", other_time_ms);
1434     print_stats(2, "Choose CSet",
1435                    (_recorded_young_cset_choice_time_ms +
1436                     _recorded_non_young_cset_choice_time_ms));
1437     print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
1438     print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
1439     print_stats(2, "Free CSet",
1440                    (_recorded_young_free_cset_time_ms +
1441                     _recorded_non_young_free_cset_time_ms));
1442 
1443     for (int i = 0; i < _aux_num; ++i) {
1444       if (_cur_aux_times_set[i]) {
1445         char buffer[96];
1446         sprintf(buffer, "Aux%d", i);
1447         print_stats(1, buffer, _cur_aux_times_ms[i]);
1448       }
1449     }
1450   }
1451 
1452   bool new_in_marking_window = _in_marking_window;
1453   bool new_in_marking_window_im = false;
1454   if (during_initial_mark_pause()) {
1455     new_in_marking_window = true;
1456     new_in_marking_window_im = true;
1457   }
1458 
1459   if (_last_young_gc) {
1460     // This is supposed to to be the "last young GC" before we start
1461     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1462 
1463     if (!last_pause_included_initial_mark) {
1464       if (next_gc_should_be_mixed("start mixed GCs",
1465                                   "do not start mixed GCs")) {
1466         set_gcs_are_young(false);
1467       }
1468     } else {
1469       ergo_verbose0(ErgoMixedGCs,
1470                     "do not start mixed GCs",
1471                     ergo_format_reason("concurrent cycle is about to start"));
1472     }
1473     _last_young_gc = false;
1474   }
1475 
1476   if (!_last_gc_was_young) {
1477     // This is a mixed GC. Here we decide whether to continue doing
1478     // mixed GCs or not.
1479 
1480     if (!next_gc_should_be_mixed("continue mixed GCs",
1481                                  "do not continue mixed GCs")) {
1482       set_gcs_are_young(true);
1483     }
1484   }
1485 
1486   _short_lived_surv_rate_group->start_adding_regions();
1487   // do that for any other surv rate groupsx
1488 
1489   if (update_stats) {
1490     double pause_time_ms = elapsed_ms;
1491 
1492     size_t diff = 0;
1493     if (_max_pending_cards >= _pending_cards) {
1494       diff = _max_pending_cards - _pending_cards;
1495     }
1496     _pending_card_diff_seq->add((double) diff);
1497 
1498     double cost_per_card_ms = 0.0;
1499     if (_pending_cards > 0) {
1500       cost_per_card_ms = update_rs_time / (double) _pending_cards;
1501       _cost_per_card_ms_seq->add(cost_per_card_ms);
1502     }
1503 
1504     size_t cards_scanned = _g1->cards_scanned();
1505 
1506     double cost_per_entry_ms = 0.0;
1507     if (cards_scanned > 10) {
1508       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
1509       if (_last_gc_was_young) {
1510         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1511       } else {
1512         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1513       }
1514     }
1515 
1516     if (_max_rs_lengths > 0) {
1517       double cards_per_entry_ratio =
1518         (double) cards_scanned / (double) _max_rs_lengths;
1519       if (_last_gc_was_young) {
1520         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1521       } else {
1522         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1523       }
1524     }
1525 
1526     // This is defensive. For a while _max_rs_lengths could get
1527     // smaller than _recorded_rs_lengths which was causing
1528     // rs_length_diff to get very large and mess up the RSet length
1529     // predictions. The reason was unsafe concurrent updates to the
1530     // _inc_cset_recorded_rs_lengths field which the code below guards
1531     // against (see CR 7118202). This bug has now been fixed (see CR
1532     // 7119027). However, I'm still worried that
1533     // _inc_cset_recorded_rs_lengths might still end up somewhat
1534     // inaccurate. The concurrent refinement thread calculates an
1535     // RSet's length concurrently with other CR threads updating it
1536     // which might cause it to calculate the length incorrectly (if,
1537     // say, it's in mid-coarsening). So I'll leave in the defensive
1538     // conditional below just in case.
1539     size_t rs_length_diff = 0;
1540     if (_max_rs_lengths > _recorded_rs_lengths) {
1541       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1542     }
1543     _rs_length_diff_seq->add((double) rs_length_diff);
1544 
1545     size_t copied_bytes = surviving_bytes;
1546     double cost_per_byte_ms = 0.0;
1547     if (copied_bytes > 0) {
1548       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
1549       if (_in_marking_window) {
1550         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1551       } else {
1552         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1553       }
1554     }
1555 
1556     double all_other_time_ms = pause_time_ms -
1557       (update_rs_time + scan_rs_time + obj_copy_time + termination_time);
1558 
1559     double young_other_time_ms = 0.0;
1560     if (young_cset_region_length() > 0) {
1561       young_other_time_ms =
1562         _recorded_young_cset_choice_time_ms +
1563         _recorded_young_free_cset_time_ms;
1564       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1565                                           (double) young_cset_region_length());
1566     }
1567     double non_young_other_time_ms = 0.0;
1568     if (old_cset_region_length() > 0) {
1569       non_young_other_time_ms =
1570         _recorded_non_young_cset_choice_time_ms +
1571         _recorded_non_young_free_cset_time_ms;
1572 
1573       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1574                                             (double) old_cset_region_length());
1575     }
1576 
1577     double constant_other_time_ms = all_other_time_ms -
1578       (young_other_time_ms + non_young_other_time_ms);
1579     _constant_other_time_ms_seq->add(constant_other_time_ms);
1580 
1581     double survival_ratio = 0.0;
1582     if (_bytes_in_collection_set_before_gc > 0) {
1583       survival_ratio = (double) _bytes_copied_during_gc /
1584                                    (double) _bytes_in_collection_set_before_gc;
1585     }
1586 
1587     _pending_cards_seq->add((double) _pending_cards);
1588     _rs_lengths_seq->add((double) _max_rs_lengths);
1589   }
1590 
1591   _in_marking_window = new_in_marking_window;
1592   _in_marking_window_im = new_in_marking_window_im;
1593   _free_regions_at_end_of_collection = _g1->free_regions();
1594   update_young_list_target_length();
1595 
1596   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1597   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1598   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
1599 
1600   _collectionSetChooser->verify();
1601 }
1602 
1603 #define EXT_SIZE_FORMAT "%d%s"
1604 #define EXT_SIZE_PARAMS(bytes)                                  \
1605   byte_size_in_proper_unit((bytes)),                            \
1606   proper_unit_for_byte_size((bytes))
1607 
1608 void G1CollectorPolicy::print_heap_transition() {
1609   if (G1Log::finer()) {
1610     YoungList* young_list = _g1->young_list();
1611     size_t eden_bytes = young_list->eden_used_bytes();
1612     size_t survivor_bytes = young_list->survivor_used_bytes();
1613     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1614     size_t used = _g1->used();
1615     size_t capacity = _g1->capacity();
1616     size_t eden_capacity =
1617       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
1618 
1619     gclog_or_tty->print_cr(
1620       "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
1621       "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
1622       "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
1623       EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
1624       EXT_SIZE_PARAMS(_eden_bytes_before_gc),
1625       EXT_SIZE_PARAMS(_prev_eden_capacity),
1626       EXT_SIZE_PARAMS(eden_bytes),
1627       EXT_SIZE_PARAMS(eden_capacity),
1628       EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
1629       EXT_SIZE_PARAMS(survivor_bytes),
1630       EXT_SIZE_PARAMS(used_before_gc),
1631       EXT_SIZE_PARAMS(_capacity_before_gc),
1632       EXT_SIZE_PARAMS(used),
1633       EXT_SIZE_PARAMS(capacity));
1634 
1635     _prev_eden_capacity = eden_capacity;
1636   } else if (G1Log::fine()) {
1637     _g1->print_size_transition(gclog_or_tty,
1638                                _cur_collection_pause_used_at_start_bytes,
1639                                _g1->used(), _g1->capacity());
1640   }
1641 }
1642 
1643 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1644                                                      double update_rs_processed_buffers,
1645                                                      double goal_ms) {
1646   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1647   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1648 
1649   if (G1UseAdaptiveConcRefinement) {
1650     const int k_gy = 3, k_gr = 6;
1651     const double inc_k = 1.1, dec_k = 0.9;
1652 
1653     int g = cg1r->green_zone();
1654     if (update_rs_time > goal_ms) {
1655       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
1656     } else {
1657       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1658         g = (int)MAX2(g * inc_k, g + 1.0);
1659       }
1660     }
1661     // Change the refinement threads params
1662     cg1r->set_green_zone(g);
1663     cg1r->set_yellow_zone(g * k_gy);
1664     cg1r->set_red_zone(g * k_gr);
1665     cg1r->reinitialize_threads();
1666 
1667     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
1668     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1669                                     cg1r->yellow_zone());
1670     // Change the barrier params
1671     dcqs.set_process_completed_threshold(processing_threshold);
1672     dcqs.set_max_completed_queue(cg1r->red_zone());
1673   }
1674 
1675   int curr_queue_size = dcqs.completed_buffers_num();
1676   if (curr_queue_size >= cg1r->yellow_zone()) {
1677     dcqs.set_completed_queue_padding(curr_queue_size);
1678   } else {
1679     dcqs.set_completed_queue_padding(0);
1680   }
1681   dcqs.notify_if_necessary();
1682 }
1683 
1684 double
1685 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1686   size_t rs_length = predict_rs_length_diff();
1687   size_t card_num;
1688   if (gcs_are_young()) {
1689     card_num = predict_young_card_num(rs_length);
1690   } else {
1691     card_num = predict_non_young_card_num(rs_length);
1692   }
1693   return predict_base_elapsed_time_ms(pending_cards, card_num);
1694 }
1695 
1696 double
1697 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1698                                                 size_t scanned_cards) {
1699   return
1700     predict_rs_update_time_ms(pending_cards) +
1701     predict_rs_scan_time_ms(scanned_cards) +
1702     predict_constant_other_time_ms();
1703 }
1704 
1705 double
1706 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1707                                                   bool young) {
1708   size_t rs_length = hr->rem_set()->occupied();
1709   size_t card_num;
1710   if (gcs_are_young()) {
1711     card_num = predict_young_card_num(rs_length);
1712   } else {
1713     card_num = predict_non_young_card_num(rs_length);
1714   }
1715   size_t bytes_to_copy = predict_bytes_to_copy(hr);
1716 
1717   double region_elapsed_time_ms =
1718     predict_rs_scan_time_ms(card_num) +
1719     predict_object_copy_time_ms(bytes_to_copy);
1720 
1721   if (young)
1722     region_elapsed_time_ms += predict_young_other_time_ms(1);
1723   else
1724     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1725 
1726   return region_elapsed_time_ms;
1727 }
1728 
1729 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1730   size_t bytes_to_copy;
1731   if (hr->is_marked())
1732     bytes_to_copy = hr->max_live_bytes();
1733   else {
1734     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1735     int age = hr->age_in_surv_rate_group();
1736     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1737     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1738   }
1739   return bytes_to_copy;
1740 }
1741 
1742 void
1743 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
1744                                             uint survivor_cset_region_length) {
1745   _eden_cset_region_length     = eden_cset_region_length;
1746   _survivor_cset_region_length = survivor_cset_region_length;
1747   _old_cset_region_length      = 0;
1748 }
1749 
1750 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1751   _recorded_rs_lengths = rs_lengths;
1752 }
1753 
1754 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1755                                                double elapsed_ms) {
1756   _recent_gc_times_ms->add(elapsed_ms);
1757   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1758   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1759 }
1760 
1761 size_t G1CollectorPolicy::expansion_amount() {
1762   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1763   double threshold = _gc_overhead_perc;
1764   if (recent_gc_overhead > threshold) {
1765     // We will double the existing space, or take
1766     // G1ExpandByPercentOfAvailable % of the available expansion
1767     // space, whichever is smaller, bounded below by a minimum
1768     // expansion (unless that's all that's left.)
1769     const size_t min_expand_bytes = 1*M;
1770     size_t reserved_bytes = _g1->max_capacity();
1771     size_t committed_bytes = _g1->capacity();
1772     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1773     size_t expand_bytes;
1774     size_t expand_bytes_via_pct =
1775       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1776     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1777     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1778     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1779 
1780     ergo_verbose5(ErgoHeapSizing,
1781                   "attempt heap expansion",
1782                   ergo_format_reason("recent GC overhead higher than "
1783                                      "threshold after GC")
1784                   ergo_format_perc("recent GC overhead")
1785                   ergo_format_perc("threshold")
1786                   ergo_format_byte("uncommitted")
1787                   ergo_format_byte_perc("calculated expansion amount"),
1788                   recent_gc_overhead, threshold,
1789                   uncommitted_bytes,
1790                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1791 
1792     return expand_bytes;
1793   } else {
1794     return 0;
1795   }
1796 }
1797 
1798 class CountCSClosure: public HeapRegionClosure {
1799   G1CollectorPolicy* _g1_policy;
1800 public:
1801   CountCSClosure(G1CollectorPolicy* g1_policy) :
1802     _g1_policy(g1_policy) {}
1803   bool doHeapRegion(HeapRegion* r) {
1804     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
1805     return false;
1806   }
1807 };
1808 
1809 void G1CollectorPolicy::count_CS_bytes_used() {
1810   CountCSClosure cs_closure(this);
1811   _g1->collection_set_iterate(&cs_closure);
1812 }
1813 
1814 void G1CollectorPolicy::print_summary(int level,
1815                                       const char* str,
1816                                       NumberSeq* seq) const {
1817   double sum = seq->sum();
1818   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
1819                 str, sum / 1000.0, seq->avg());
1820 }
1821 
1822 void G1CollectorPolicy::print_summary_sd(int level,
1823                                          const char* str,
1824                                          NumberSeq* seq) const {
1825   print_summary(level, str, seq);
1826   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
1827                 seq->num(), seq->sd(), seq->maximum());
1828 }
1829 
1830 void G1CollectorPolicy::check_other_times(int level,
1831                                         NumberSeq* other_times_ms,
1832                                         NumberSeq* calc_other_times_ms) const {
1833   bool should_print = false;
1834   LineBuffer buf(level + 2);
1835 
1836   double max_sum = MAX2(fabs(other_times_ms->sum()),
1837                         fabs(calc_other_times_ms->sum()));
1838   double min_sum = MIN2(fabs(other_times_ms->sum()),
1839                         fabs(calc_other_times_ms->sum()));
1840   double sum_ratio = max_sum / min_sum;
1841   if (sum_ratio > 1.1) {
1842     should_print = true;
1843     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
1844   }
1845 
1846   double max_avg = MAX2(fabs(other_times_ms->avg()),
1847                         fabs(calc_other_times_ms->avg()));
1848   double min_avg = MIN2(fabs(other_times_ms->avg()),
1849                         fabs(calc_other_times_ms->avg()));
1850   double avg_ratio = max_avg / min_avg;
1851   if (avg_ratio > 1.1) {
1852     should_print = true;
1853     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
1854   }
1855 
1856   if (other_times_ms->sum() < -0.01) {
1857     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
1858   }
1859 
1860   if (other_times_ms->avg() < -0.01) {
1861     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
1862   }
1863 
1864   if (calc_other_times_ms->sum() < -0.01) {
1865     should_print = true;
1866     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
1867   }
1868 
1869   if (calc_other_times_ms->avg() < -0.01) {
1870     should_print = true;
1871     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
1872   }
1873 
1874   if (should_print)
1875     print_summary(level, "Other(Calc)", calc_other_times_ms);
1876 }
1877 
1878 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
1879   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1880   MainBodySummary*    body_summary = summary->main_body_summary();
1881   if (summary->get_total_seq()->num() > 0) {
1882     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
1883     if (body_summary != NULL) {
1884       print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
1885       if (parallel) {
1886         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
1887         print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
1888         print_summary(2, "SATB Filtering", body_summary->get_satb_filtering_seq());
1889         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
1890         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
1891         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
1892         print_summary(2, "Termination", body_summary->get_termination_seq());
1893         print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
1894         {
1895           NumberSeq* other_parts[] = {
1896             body_summary->get_ext_root_scan_seq(),
1897             body_summary->get_satb_filtering_seq(),
1898             body_summary->get_update_rs_seq(),
1899             body_summary->get_scan_rs_seq(),
1900             body_summary->get_obj_copy_seq(),
1901             body_summary->get_termination_seq()
1902           };
1903           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
1904                                         6, other_parts);
1905           check_other_times(2, body_summary->get_parallel_other_seq(),
1906                             &calc_other_times_ms);
1907         }
1908       } else {
1909         print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
1910         print_summary(1, "SATB Filtering", body_summary->get_satb_filtering_seq());
1911         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
1912         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
1913         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
1914       }
1915     }
1916     print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
1917     print_summary(1, "Other", summary->get_other_seq());
1918     {
1919       if (body_summary != NULL) {
1920         NumberSeq calc_other_times_ms;
1921         if (parallel) {
1922           // parallel
1923           NumberSeq* other_parts[] = {
1924             body_summary->get_root_region_scan_wait_seq(),
1925             body_summary->get_parallel_seq(),
1926             body_summary->get_clear_ct_seq()
1927           };
1928           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
1929                                           3, other_parts);
1930         } else {
1931           // serial
1932           NumberSeq* other_parts[] = {
1933             body_summary->get_root_region_scan_wait_seq(),
1934             body_summary->get_update_rs_seq(),
1935             body_summary->get_ext_root_scan_seq(),
1936             body_summary->get_satb_filtering_seq(),
1937             body_summary->get_scan_rs_seq(),
1938             body_summary->get_obj_copy_seq()
1939           };
1940           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
1941                                           6, other_parts);
1942         }
1943         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
1944       }
1945     }
1946   } else {
1947     LineBuffer(1).append_and_print_cr("none");
1948   }
1949   LineBuffer(0).append_and_print_cr("");
1950 }
1951 
1952 void G1CollectorPolicy::print_tracing_info() const {
1953   if (TraceGen0Time) {
1954     gclog_or_tty->print_cr("ALL PAUSES");
1955     print_summary_sd(0, "Total", _all_pause_times_ms);
1956     gclog_or_tty->print_cr("");
1957     gclog_or_tty->print_cr("");
1958     gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
1959     gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
1960     gclog_or_tty->print_cr("");
1961 
1962     gclog_or_tty->print_cr("EVACUATION PAUSES");
1963     print_summary(_summary);
1964 
1965     gclog_or_tty->print_cr("MISC");
1966     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
1967     print_summary_sd(0, "Yields", _all_yield_times_ms);
1968     for (int i = 0; i < _aux_num; ++i) {
1969       if (_all_aux_times_ms[i].num() > 0) {
1970         char buffer[96];
1971         sprintf(buffer, "Aux%d", i);
1972         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
1973       }
1974     }
1975   }
1976   if (TraceGen1Time) {
1977     if (_all_full_gc_times_ms->num() > 0) {
1978       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
1979                  _all_full_gc_times_ms->num(),
1980                  _all_full_gc_times_ms->sum() / 1000.0);
1981       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
1982       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
1983                     _all_full_gc_times_ms->sd(),
1984                     _all_full_gc_times_ms->maximum());
1985     }
1986   }
1987 }
1988 
1989 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1990 #ifndef PRODUCT
1991   _short_lived_surv_rate_group->print_surv_rate_summary();
1992   // add this call for any other surv rate groups
1993 #endif // PRODUCT
1994 }
1995 
1996 #ifndef PRODUCT
1997 // for debugging, bit of a hack...
1998 static char*
1999 region_num_to_mbs(int length) {
2000   static char buffer[64];
2001   double bytes = (double) (length * HeapRegion::GrainBytes);
2002   double mbs = bytes / (double) (1024 * 1024);
2003   sprintf(buffer, "%7.2lfMB", mbs);
2004   return buffer;
2005 }
2006 #endif // PRODUCT
2007 
2008 uint G1CollectorPolicy::max_regions(int purpose) {
2009   switch (purpose) {
2010     case GCAllocForSurvived:
2011       return _max_survivor_regions;
2012     case GCAllocForTenured:
2013       return REGIONS_UNLIMITED;
2014     default:
2015       ShouldNotReachHere();
2016       return REGIONS_UNLIMITED;
2017   };
2018 }
2019 
2020 void G1CollectorPolicy::update_max_gc_locker_expansion() {
2021   uint expansion_region_num = 0;
2022   if (GCLockerEdenExpansionPercent > 0) {
2023     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
2024     double expansion_region_num_d = perc * (double) _young_list_target_length;
2025     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
2026     // less than 1.0) we'll get 1.
2027     expansion_region_num = (uint) ceil(expansion_region_num_d);
2028   } else {
2029     assert(expansion_region_num == 0, "sanity");
2030   }
2031   _young_list_max_length = _young_list_target_length + expansion_region_num;
2032   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
2033 }
2034 
2035 // Calculates survivor space parameters.
2036 void G1CollectorPolicy::update_survivors_policy() {
2037   double max_survivor_regions_d =
2038                  (double) _young_list_target_length / (double) SurvivorRatio;
2039   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
2040   // smaller than 1.0) we'll get 1.
2041   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
2042 
2043   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
2044         HeapRegion::GrainWords * _max_survivor_regions);
2045 }
2046 
2047 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
2048                                                      GCCause::Cause gc_cause) {
2049   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
2050   if (!during_cycle) {
2051     ergo_verbose1(ErgoConcCycles,
2052                   "request concurrent cycle initiation",
2053                   ergo_format_reason("requested by GC cause")
2054                   ergo_format_str("GC cause"),
2055                   GCCause::to_string(gc_cause));
2056     set_initiate_conc_mark_if_possible();
2057     return true;
2058   } else {
2059     ergo_verbose1(ErgoConcCycles,
2060                   "do not request concurrent cycle initiation",
2061                   ergo_format_reason("concurrent cycle already in progress")
2062                   ergo_format_str("GC cause"),
2063                   GCCause::to_string(gc_cause));
2064     return false;
2065   }
2066 }
2067 
2068 void
2069 G1CollectorPolicy::decide_on_conc_mark_initiation() {
2070   // We are about to decide on whether this pause will be an
2071   // initial-mark pause.
2072 
2073   // First, during_initial_mark_pause() should not be already set. We
2074   // will set it here if we have to. However, it should be cleared by
2075   // the end of the pause (it's only set for the duration of an
2076   // initial-mark pause).
2077   assert(!during_initial_mark_pause(), "pre-condition");
2078 
2079   if (initiate_conc_mark_if_possible()) {
2080     // We had noticed on a previous pause that the heap occupancy has
2081     // gone over the initiating threshold and we should start a
2082     // concurrent marking cycle. So we might initiate one.
2083 
2084     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
2085     if (!during_cycle) {
2086       // The concurrent marking thread is not "during a cycle", i.e.,
2087       // it has completed the last one. So we can go ahead and
2088       // initiate a new cycle.
2089 
2090       set_during_initial_mark_pause();
2091       // We do not allow mixed GCs during marking.
2092       if (!gcs_are_young()) {
2093         set_gcs_are_young(true);
2094         ergo_verbose0(ErgoMixedGCs,
2095                       "end mixed GCs",
2096                       ergo_format_reason("concurrent cycle is about to start"));
2097       }
2098 
2099       // And we can now clear initiate_conc_mark_if_possible() as
2100       // we've already acted on it.
2101       clear_initiate_conc_mark_if_possible();
2102 
2103       ergo_verbose0(ErgoConcCycles,
2104                   "initiate concurrent cycle",
2105                   ergo_format_reason("concurrent cycle initiation requested"));
2106     } else {
2107       // The concurrent marking thread is still finishing up the
2108       // previous cycle. If we start one right now the two cycles
2109       // overlap. In particular, the concurrent marking thread might
2110       // be in the process of clearing the next marking bitmap (which
2111       // we will use for the next cycle if we start one). Starting a
2112       // cycle now will be bad given that parts of the marking
2113       // information might get cleared by the marking thread. And we
2114       // cannot wait for the marking thread to finish the cycle as it
2115       // periodically yields while clearing the next marking bitmap
2116       // and, if it's in a yield point, it's waiting for us to
2117       // finish. So, at this point we will not start a cycle and we'll
2118       // let the concurrent marking thread complete the last one.
2119       ergo_verbose0(ErgoConcCycles,
2120                     "do not initiate concurrent cycle",
2121                     ergo_format_reason("concurrent cycle already in progress"));
2122     }
2123   }
2124 }
2125 
2126 class KnownGarbageClosure: public HeapRegionClosure {
2127   G1CollectedHeap* _g1h;
2128   CollectionSetChooser* _hrSorted;
2129 
2130 public:
2131   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
2132     _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
2133 
2134   bool doHeapRegion(HeapRegion* r) {
2135     // We only include humongous regions in collection
2136     // sets when concurrent mark shows that their contained object is
2137     // unreachable.
2138 
2139     // Do we have any marking information for this region?
2140     if (r->is_marked()) {
2141       // We will skip any region that's currently used as an old GC
2142       // alloc region (we should not consider those for collection
2143       // before we fill them up).
2144       if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
2145         _hrSorted->add_region(r);
2146       }
2147     }
2148     return false;
2149   }
2150 };
2151 
2152 class ParKnownGarbageHRClosure: public HeapRegionClosure {
2153   G1CollectedHeap* _g1h;
2154   CollectionSetChooser* _hrSorted;
2155   uint _marked_regions_added;
2156   size_t _reclaimable_bytes_added;
2157   uint _chunk_size;
2158   uint _cur_chunk_idx;
2159   uint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
2160 
2161   void get_new_chunk() {
2162     _cur_chunk_idx = _hrSorted->claim_array_chunk(_chunk_size);
2163     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
2164   }
2165   void add_region(HeapRegion* r) {
2166     if (_cur_chunk_idx == _cur_chunk_end) {
2167       get_new_chunk();
2168     }
2169     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
2170     _hrSorted->set_region(_cur_chunk_idx, r);
2171     _marked_regions_added++;
2172     _reclaimable_bytes_added += r->reclaimable_bytes();
2173     _cur_chunk_idx++;
2174   }
2175 
2176 public:
2177   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
2178                            uint chunk_size) :
2179       _g1h(G1CollectedHeap::heap()),
2180       _hrSorted(hrSorted), _chunk_size(chunk_size),
2181       _marked_regions_added(0), _reclaimable_bytes_added(0),
2182       _cur_chunk_idx(0), _cur_chunk_end(0) { }
2183 
2184   bool doHeapRegion(HeapRegion* r) {
2185     // Do we have any marking information for this region?
2186     if (r->is_marked()) {
2187       // We will skip any region that's currently used as an old GC
2188       // alloc region (we should not consider those for collection
2189       // before we fill them up).
2190       if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
2191         add_region(r);
2192       }
2193     }
2194     return false;
2195   }
2196   uint marked_regions_added() { return _marked_regions_added; }
2197   size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
2198 };
2199 
2200 class ParKnownGarbageTask: public AbstractGangTask {
2201   CollectionSetChooser* _hrSorted;
2202   uint _chunk_size;
2203   G1CollectedHeap* _g1;
2204 public:
2205   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
2206     AbstractGangTask("ParKnownGarbageTask"),
2207     _hrSorted(hrSorted), _chunk_size(chunk_size),
2208     _g1(G1CollectedHeap::heap()) { }
2209 
2210   void work(uint worker_id) {
2211     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
2212 
2213     // Back to zero for the claim value.
2214     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
2215                                          _g1->workers()->active_workers(),
2216                                          HeapRegion::InitialClaimValue);
2217     uint regions_added = parKnownGarbageCl.marked_regions_added();
2218     size_t reclaimable_bytes_added =
2219                                    parKnownGarbageCl.reclaimable_bytes_added();
2220     _hrSorted->update_totals(regions_added, reclaimable_bytes_added);
2221   }
2222 };
2223 
2224 void
2225 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
2226   _collectionSetChooser->clear();
2227 
2228   uint region_num = _g1->n_regions();
2229   if (G1CollectedHeap::use_parallel_gc_threads()) {
2230     const uint OverpartitionFactor = 4;
2231     uint WorkUnit;
2232     // The use of MinChunkSize = 8 in the original code
2233     // causes some assertion failures when the total number of
2234     // region is less than 8.  The code here tries to fix that.
2235     // Should the original code also be fixed?
2236     if (no_of_gc_threads > 0) {
2237       const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
2238       WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
2239                       MinWorkUnit);
2240     } else {
2241       assert(no_of_gc_threads > 0,
2242         "The active gc workers should be greater than 0");
2243       // In a product build do something reasonable to avoid a crash.
2244       const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
2245       WorkUnit =
2246         MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
2247              MinWorkUnit);
2248     }
2249     _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
2250                                                            WorkUnit);
2251     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
2252                                             (int) WorkUnit);
2253     _g1->workers()->run_task(&parKnownGarbageTask);
2254 
2255     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2256            "sanity check");
2257   } else {
2258     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
2259     _g1->heap_region_iterate(&knownGarbagecl);
2260   }
2261 
2262   _collectionSetChooser->sort_regions();
2263 
2264   double end_sec = os::elapsedTime();
2265   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
2266   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
2267   _cur_mark_stop_world_time_ms += elapsed_time_ms;
2268   _prev_collection_pause_end_ms += elapsed_time_ms;
2269   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
2270 }
2271 
2272 // Add the heap region at the head of the non-incremental collection set
2273 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
2274   assert(_inc_cset_build_state == Active, "Precondition");
2275   assert(!hr->is_young(), "non-incremental add of young region");
2276 
2277   assert(!hr->in_collection_set(), "should not already be in the CSet");
2278   hr->set_in_collection_set(true);
2279   hr->set_next_in_collection_set(_collection_set);
2280   _collection_set = hr;
2281   _collection_set_bytes_used_before += hr->used();
2282   _g1->register_region_with_in_cset_fast_test(hr);
2283   size_t rs_length = hr->rem_set()->occupied();
2284   _recorded_rs_lengths += rs_length;
2285   _old_cset_region_length += 1;
2286 }
2287 
2288 // Initialize the per-collection-set information
2289 void G1CollectorPolicy::start_incremental_cset_building() {
2290   assert(_inc_cset_build_state == Inactive, "Precondition");
2291 
2292   _inc_cset_head = NULL;
2293   _inc_cset_tail = NULL;
2294   _inc_cset_bytes_used_before = 0;
2295 
2296   _inc_cset_max_finger = 0;
2297   _inc_cset_recorded_rs_lengths = 0;
2298   _inc_cset_recorded_rs_lengths_diffs = 0;
2299   _inc_cset_predicted_elapsed_time_ms = 0.0;
2300   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
2301   _inc_cset_build_state = Active;
2302 }
2303 
2304 void G1CollectorPolicy::finalize_incremental_cset_building() {
2305   assert(_inc_cset_build_state == Active, "Precondition");
2306   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2307 
2308   // The two "main" fields, _inc_cset_recorded_rs_lengths and
2309   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
2310   // that adds a new region to the CSet. Further updates by the
2311   // concurrent refinement thread that samples the young RSet lengths
2312   // are accumulated in the *_diffs fields. Here we add the diffs to
2313   // the "main" fields.
2314 
2315   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
2316     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
2317   } else {
2318     // This is defensive. The diff should in theory be always positive
2319     // as RSets can only grow between GCs. However, given that we
2320     // sample their size concurrently with other threads updating them
2321     // it's possible that we might get the wrong size back, which
2322     // could make the calculations somewhat inaccurate.
2323     size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
2324     if (_inc_cset_recorded_rs_lengths >= diffs) {
2325       _inc_cset_recorded_rs_lengths -= diffs;
2326     } else {
2327       _inc_cset_recorded_rs_lengths = 0;
2328     }
2329   }
2330   _inc_cset_predicted_elapsed_time_ms +=
2331                                      _inc_cset_predicted_elapsed_time_ms_diffs;
2332 
2333   _inc_cset_recorded_rs_lengths_diffs = 0;
2334   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
2335 }
2336 
2337 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
2338   // This routine is used when:
2339   // * adding survivor regions to the incremental cset at the end of an
2340   //   evacuation pause,
2341   // * adding the current allocation region to the incremental cset
2342   //   when it is retired, and
2343   // * updating existing policy information for a region in the
2344   //   incremental cset via young list RSet sampling.
2345   // Therefore this routine may be called at a safepoint by the
2346   // VM thread, or in-between safepoints by mutator threads (when
2347   // retiring the current allocation region) or a concurrent
2348   // refine thread (RSet sampling).
2349 
2350   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
2351   size_t used_bytes = hr->used();
2352   _inc_cset_recorded_rs_lengths += rs_length;
2353   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
2354   _inc_cset_bytes_used_before += used_bytes;
2355 
2356   // Cache the values we have added to the aggregated informtion
2357   // in the heap region in case we have to remove this region from
2358   // the incremental collection set, or it is updated by the
2359   // rset sampling code
2360   hr->set_recorded_rs_length(rs_length);
2361   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
2362 }
2363 
2364 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
2365                                                      size_t new_rs_length) {
2366   // Update the CSet information that is dependent on the new RS length
2367   assert(hr->is_young(), "Precondition");
2368   assert(!SafepointSynchronize::is_at_safepoint(),
2369                                                "should not be at a safepoint");
2370 
2371   // We could have updated _inc_cset_recorded_rs_lengths and
2372   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
2373   // that atomically, as this code is executed by a concurrent
2374   // refinement thread, potentially concurrently with a mutator thread
2375   // allocating a new region and also updating the same fields. To
2376   // avoid the atomic operations we accumulate these updates on two
2377   // separate fields (*_diffs) and we'll just add them to the "main"
2378   // fields at the start of a GC.
2379 
2380   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
2381   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
2382   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
2383 
2384   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
2385   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
2386   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
2387   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
2388 
2389   hr->set_recorded_rs_length(new_rs_length);
2390   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
2391 }
2392 
2393 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
2394   assert(hr->is_young(), "invariant");
2395   assert(hr->young_index_in_cset() > -1, "should have already been set");
2396   assert(_inc_cset_build_state == Active, "Precondition");
2397 
2398   // We need to clear and set the cached recorded/cached collection set
2399   // information in the heap region here (before the region gets added
2400   // to the collection set). An individual heap region's cached values
2401   // are calculated, aggregated with the policy collection set info,
2402   // and cached in the heap region here (initially) and (subsequently)
2403   // by the Young List sampling code.
2404 
2405   size_t rs_length = hr->rem_set()->occupied();
2406   add_to_incremental_cset_info(hr, rs_length);
2407 
2408   HeapWord* hr_end = hr->end();
2409   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
2410 
2411   assert(!hr->in_collection_set(), "invariant");
2412   hr->set_in_collection_set(true);
2413   assert( hr->next_in_collection_set() == NULL, "invariant");
2414 
2415   _g1->register_region_with_in_cset_fast_test(hr);
2416 }
2417 
2418 // Add the region at the RHS of the incremental cset
2419 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
2420   // We should only ever be appending survivors at the end of a pause
2421   assert( hr->is_survivor(), "Logic");
2422 
2423   // Do the 'common' stuff
2424   add_region_to_incremental_cset_common(hr);
2425 
2426   // Now add the region at the right hand side
2427   if (_inc_cset_tail == NULL) {
2428     assert(_inc_cset_head == NULL, "invariant");
2429     _inc_cset_head = hr;
2430   } else {
2431     _inc_cset_tail->set_next_in_collection_set(hr);
2432   }
2433   _inc_cset_tail = hr;
2434 }
2435 
2436 // Add the region to the LHS of the incremental cset
2437 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
2438   // Survivors should be added to the RHS at the end of a pause
2439   assert(!hr->is_survivor(), "Logic");
2440 
2441   // Do the 'common' stuff
2442   add_region_to_incremental_cset_common(hr);
2443 
2444   // Add the region at the left hand side
2445   hr->set_next_in_collection_set(_inc_cset_head);
2446   if (_inc_cset_head == NULL) {
2447     assert(_inc_cset_tail == NULL, "Invariant");
2448     _inc_cset_tail = hr;
2449   }
2450   _inc_cset_head = hr;
2451 }
2452 
2453 #ifndef PRODUCT
2454 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
2455   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
2456 
2457   st->print_cr("\nCollection_set:");
2458   HeapRegion* csr = list_head;
2459   while (csr != NULL) {
2460     HeapRegion* next = csr->next_in_collection_set();
2461     assert(csr->in_collection_set(), "bad CS");
2462     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
2463                  "age: %4d, y: %d, surv: %d",
2464                         csr->bottom(), csr->end(),
2465                         csr->top(),
2466                         csr->prev_top_at_mark_start(),
2467                         csr->next_top_at_mark_start(),
2468                         csr->top_at_conc_mark_count(),
2469                         csr->age_in_surv_rate_group_cond(),
2470                         csr->is_young(),
2471                         csr->is_survivor());
2472     csr = next;
2473   }
2474 }
2475 #endif // !PRODUCT
2476 
2477 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
2478                                                 const char* false_action_str) {
2479   CollectionSetChooser* cset_chooser = _collectionSetChooser;
2480   if (cset_chooser->is_empty()) {
2481     ergo_verbose0(ErgoMixedGCs,
2482                   false_action_str,
2483                   ergo_format_reason("candidate old regions not available"));
2484     return false;
2485   }
2486   size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2487   size_t capacity_bytes = _g1->capacity();
2488   double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
2489   double threshold = (double) G1HeapWastePercent;
2490   if (perc < threshold) {
2491     ergo_verbose4(ErgoMixedGCs,
2492               false_action_str,
2493               ergo_format_reason("reclaimable percentage lower than threshold")
2494               ergo_format_region("candidate old regions")
2495               ergo_format_byte_perc("reclaimable")
2496               ergo_format_perc("threshold"),
2497               cset_chooser->remaining_regions(),
2498               reclaimable_bytes, perc, threshold);
2499     return false;
2500   }
2501 
2502   ergo_verbose4(ErgoMixedGCs,
2503                 true_action_str,
2504                 ergo_format_reason("candidate old regions available")
2505                 ergo_format_region("candidate old regions")
2506                 ergo_format_byte_perc("reclaimable")
2507                 ergo_format_perc("threshold"),
2508                 cset_chooser->remaining_regions(),
2509                 reclaimable_bytes, perc, threshold);
2510   return true;
2511 }
2512 
2513 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
2514   // Set this here - in case we're not doing young collections.
2515   double non_young_start_time_sec = os::elapsedTime();
2516 
2517   YoungList* young_list = _g1->young_list();
2518   finalize_incremental_cset_building();
2519 
2520   guarantee(target_pause_time_ms > 0.0,
2521             err_msg("target_pause_time_ms = %1.6lf should be positive",
2522                     target_pause_time_ms));
2523   guarantee(_collection_set == NULL, "Precondition");
2524 
2525   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2526   double predicted_pause_time_ms = base_time_ms;
2527   double time_remaining_ms = target_pause_time_ms - base_time_ms;
2528 
2529   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
2530                 "start choosing CSet",
2531                 ergo_format_ms("predicted base time")
2532                 ergo_format_ms("remaining time")
2533                 ergo_format_ms("target pause time"),
2534                 base_time_ms, time_remaining_ms, target_pause_time_ms);
2535 
2536   HeapRegion* hr;
2537   double young_start_time_sec = os::elapsedTime();
2538 
2539   _collection_set_bytes_used_before = 0;
2540   _last_gc_was_young = gcs_are_young() ? true : false;
2541 
2542   if (_last_gc_was_young) {
2543     ++_young_pause_num;
2544   } else {
2545     ++_mixed_pause_num;
2546   }
2547 
2548   // The young list is laid with the survivor regions from the previous
2549   // pause are appended to the RHS of the young list, i.e.
2550   //   [Newly Young Regions ++ Survivors from last pause].
2551 
2552   uint survivor_region_length = young_list->survivor_length();
2553   uint eden_region_length = young_list->length() - survivor_region_length;
2554   init_cset_region_lengths(eden_region_length, survivor_region_length);
2555   hr = young_list->first_survivor_region();
2556   while (hr != NULL) {
2557     assert(hr->is_survivor(), "badly formed young list");
2558     hr->set_young();
2559     hr = hr->get_next_young_region();
2560   }
2561 
2562   // Clear the fields that point to the survivor list - they are all young now.
2563   young_list->clear_survivors();
2564 
2565   _collection_set = _inc_cset_head;
2566   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2567   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
2568   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
2569 
2570   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
2571                 "add young regions to CSet",
2572                 ergo_format_region("eden")
2573                 ergo_format_region("survivors")
2574                 ergo_format_ms("predicted young region time"),
2575                 eden_region_length, survivor_region_length,
2576                 _inc_cset_predicted_elapsed_time_ms);
2577 
2578   // The number of recorded young regions is the incremental
2579   // collection set's current size
2580   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2581 
2582   double young_end_time_sec = os::elapsedTime();
2583   _recorded_young_cset_choice_time_ms =
2584     (young_end_time_sec - young_start_time_sec) * 1000.0;
2585 
2586   // We are doing young collections so reset this.
2587   non_young_start_time_sec = young_end_time_sec;
2588 
2589   if (!gcs_are_young()) {
2590     CollectionSetChooser* cset_chooser = _collectionSetChooser;
2591     cset_chooser->verify();
2592     const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length();
2593     const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length();
2594 
2595     uint expensive_region_num = 0;
2596     bool check_time_remaining = adaptive_young_list_length();
2597     HeapRegion* hr = cset_chooser->peek();
2598     while (hr != NULL) {
2599       if (old_cset_region_length() >= max_old_cset_length) {
2600         // Added maximum number of old regions to the CSet.
2601         ergo_verbose2(ErgoCSetConstruction,
2602                       "finish adding old regions to CSet",
2603                       ergo_format_reason("old CSet region num reached max")
2604                       ergo_format_region("old")
2605                       ergo_format_region("max"),
2606                       old_cset_region_length(), max_old_cset_length);
2607         break;
2608       }
2609 
2610       double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
2611       if (check_time_remaining) {
2612         if (predicted_time_ms > time_remaining_ms) {
2613           // Too expensive for the current CSet.
2614 
2615           if (old_cset_region_length() >= min_old_cset_length) {
2616             // We have added the minimum number of old regions to the CSet,
2617             // we are done with this CSet.
2618             ergo_verbose4(ErgoCSetConstruction,
2619                           "finish adding old regions to CSet",
2620                           ergo_format_reason("predicted time is too high")
2621                           ergo_format_ms("predicted time")
2622                           ergo_format_ms("remaining time")
2623                           ergo_format_region("old")
2624                           ergo_format_region("min"),
2625                           predicted_time_ms, time_remaining_ms,
2626                           old_cset_region_length(), min_old_cset_length);
2627             break;
2628           }
2629 
2630           // We'll add it anyway given that we haven't reached the
2631           // minimum number of old regions.
2632           expensive_region_num += 1;
2633         }
2634       } else {
2635         if (old_cset_region_length() >= min_old_cset_length) {
2636           // In the non-auto-tuning case, we'll finish adding regions
2637           // to the CSet if we reach the minimum.
2638           ergo_verbose2(ErgoCSetConstruction,
2639                         "finish adding old regions to CSet",
2640                         ergo_format_reason("old CSet region num reached min")
2641                         ergo_format_region("old")
2642                         ergo_format_region("min"),
2643                         old_cset_region_length(), min_old_cset_length);
2644           break;
2645         }
2646       }
2647 
2648       // We will add this region to the CSet.
2649       time_remaining_ms -= predicted_time_ms;
2650       predicted_pause_time_ms += predicted_time_ms;
2651       cset_chooser->remove_and_move_to_next(hr);
2652       _g1->old_set_remove(hr);
2653       add_old_region_to_cset(hr);
2654 
2655       hr = cset_chooser->peek();
2656     }
2657     if (hr == NULL) {
2658       ergo_verbose0(ErgoCSetConstruction,
2659                     "finish adding old regions to CSet",
2660                     ergo_format_reason("candidate old regions not available"));
2661     }
2662 
2663     if (expensive_region_num > 0) {
2664       // We print the information once here at the end, predicated on
2665       // whether we added any apparently expensive regions or not, to
2666       // avoid generating output per region.
2667       ergo_verbose4(ErgoCSetConstruction,
2668                     "added expensive regions to CSet",
2669                     ergo_format_reason("old CSet region num not reached min")
2670                     ergo_format_region("old")
2671                     ergo_format_region("expensive")
2672                     ergo_format_region("min")
2673                     ergo_format_ms("remaining time"),
2674                     old_cset_region_length(),
2675                     expensive_region_num,
2676                     min_old_cset_length,
2677                     time_remaining_ms);
2678     }
2679 
2680     cset_chooser->verify();
2681   }
2682 
2683   stop_incremental_cset_building();
2684 
2685   count_CS_bytes_used();
2686 
2687   ergo_verbose5(ErgoCSetConstruction,
2688                 "finish choosing CSet",
2689                 ergo_format_region("eden")
2690                 ergo_format_region("survivors")
2691                 ergo_format_region("old")
2692                 ergo_format_ms("predicted pause time")
2693                 ergo_format_ms("target pause time"),
2694                 eden_region_length, survivor_region_length,
2695                 old_cset_region_length(),
2696                 predicted_pause_time_ms, target_pause_time_ms);
2697 
2698   double non_young_end_time_sec = os::elapsedTime();
2699   _recorded_non_young_cset_choice_time_ms =
2700     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
2701 }