1 /*
   2  * Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/adaptiveSizePolicy.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcCause.hpp"
  29 #include "gc/shared/gcUtil.inline.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "logging/log.hpp"
  32 #include "runtime/timer.hpp"
  33 #include "utilities/ostream.hpp"
  34 elapsedTimer AdaptiveSizePolicy::_minor_timer;
  35 elapsedTimer AdaptiveSizePolicy::_major_timer;
  36 bool AdaptiveSizePolicy::_debug_perturbation = false;
  37 
  38 // The throughput goal is implemented as
  39 //      _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
  40 // gc_cost_ratio is the ratio
  41 //      application cost / gc cost
  42 // For example a gc_cost_ratio of 4 translates into a
  43 // throughput goal of .80
  44 
  45 AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
  46                                        size_t init_promo_size,
  47                                        size_t init_survivor_size,
  48                                        double gc_pause_goal_sec,
  49                                        uint gc_cost_ratio) :
  50     _eden_size(init_eden_size),
  51     _promo_size(init_promo_size),
  52     _survivor_size(init_survivor_size),
  53     _gc_pause_goal_sec(gc_pause_goal_sec),
  54     _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
  55     _gc_overhead_limit_exceeded(false),
  56     _print_gc_overhead_limit_would_be_exceeded(false),
  57     _gc_overhead_limit_count(0),
  58     _latest_minor_mutator_interval_seconds(0),
  59     _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
  60     _young_gen_change_for_minor_throughput(0),
  61     _old_gen_change_for_major_throughput(0) {
  62   assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
  63     "No opportunity to clear SoftReferences before GC overhead limit");
  64   _avg_minor_pause    =
  65     new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
  66   _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  67   _avg_minor_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  68   _avg_major_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  69 
  70   _avg_young_live     = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  71   _avg_old_live       = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  72   _avg_eden_live      = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  73 
  74   _avg_survived       = new AdaptivePaddedAverage(AdaptiveSizePolicyWeight,
  75                                                   SurvivorPadding);
  76   _avg_pretenured     = new AdaptivePaddedNoZeroDevAverage(
  77                                                   AdaptiveSizePolicyWeight,
  78                                                   SurvivorPadding);
  79 
  80   _minor_pause_old_estimator =
  81     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  82   _minor_pause_young_estimator =
  83     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  84   _minor_collection_estimator =
  85     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  86   _major_collection_estimator =
  87     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  88 
  89   // Start the timers
  90   _minor_timer.start();
  91 
  92   _young_gen_policy_is_ready = false;
  93 }
  94 
  95 //  If the number of GC threads was set on the command line,
  96 // use it.
  97 //  Else
  98 //    Calculate the number of GC threads based on the number of Java threads.
  99 //    Calculate the number of GC threads based on the size of the heap.
 100 //    Use the larger.
 101 
 102 uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
 103                                                      const uintx min_workers,
 104                                                      uintx active_workers,
 105                                                      uintx application_workers) {
 106   // If the user has specifically set the number of
 107   // GC threads, use them.
 108 
 109   // If the user has turned off using a dynamic number of GC threads
 110   // or the users has requested a specific number, set the active
 111   // number of workers to all the workers.
 112 
 113   uintx new_active_workers = total_workers;
 114   uintx prev_active_workers = active_workers;
 115   uintx active_workers_by_JT = 0;
 116   uintx active_workers_by_heap_size = 0;
 117 
 118   // Always use at least min_workers but use up to
 119   // GCThreadsPerJavaThreads * application threads.
 120   active_workers_by_JT =
 121     MAX2((uintx) GCWorkersPerJavaThread * application_workers,
 122          min_workers);
 123 
 124   // Choose a number of GC threads based on the current size
 125   // of the heap.  This may be complicated because the size of
 126   // the heap depends on factors such as the throughput goal.
 127   // Still a large heap should be collected by more GC threads.
 128   active_workers_by_heap_size =
 129       MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
 130 
 131   uintx max_active_workers =
 132     MAX2(active_workers_by_JT, active_workers_by_heap_size);
 133 
 134   new_active_workers = MIN2(max_active_workers, (uintx) total_workers);
 135 
 136   // Increase GC workers instantly but decrease them more
 137   // slowly.
 138   if (new_active_workers < prev_active_workers) {
 139     new_active_workers =
 140       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
 141   }
 142 
 143   // Check once more that the number of workers is within the limits.
 144   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
 145   assert(new_active_workers >= min_workers, "Minimum workers not observed");
 146   assert(new_active_workers <= total_workers, "Total workers not observed");
 147 
 148   if (ForceDynamicNumberOfGCThreads) {
 149     // Assume this is debugging and jiggle the number of GC threads.
 150     if (new_active_workers == prev_active_workers) {
 151       if (new_active_workers < total_workers) {
 152         new_active_workers++;
 153       } else if (new_active_workers > min_workers) {
 154         new_active_workers--;
 155       }
 156     }
 157     if (new_active_workers == total_workers) {
 158       if (_debug_perturbation) {
 159         new_active_workers =  min_workers;
 160       }
 161       _debug_perturbation = !_debug_perturbation;
 162     }
 163     assert((new_active_workers <= ParallelGCThreads) &&
 164            (new_active_workers >= min_workers),
 165       "Jiggled active workers too much");
 166   }
 167 
 168   log_trace(gc, task)("GCTaskManager::calc_default_active_workers() : "
 169      "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
 170      "prev_active_workers: " UINTX_FORMAT "\n"
 171      " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_heap_size: " UINTX_FORMAT,
 172      active_workers, new_active_workers, prev_active_workers,
 173      active_workers_by_JT, active_workers_by_heap_size);
 174   assert(new_active_workers > 0, "Always need at least 1");
 175   return new_active_workers;
 176 }
 177 
 178 uint AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
 179                                              uintx active_workers,
 180                                              uintx application_workers) {
 181   // If the user has specifically set the number of
 182   // GC threads, use them.
 183 
 184   // If the user has turned off using a dynamic number of GC threads
 185   // or the users has requested a specific number, set the active
 186   // number of workers to all the workers.
 187 
 188   uint new_active_workers;
 189   if (!UseDynamicNumberOfGCThreads ||
 190      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
 191     new_active_workers = total_workers;
 192   } else {
 193     uintx min_workers = (total_workers == 1) ? 1 : 2;
 194     new_active_workers = calc_default_active_workers(total_workers,
 195                                                      min_workers,
 196                                                      active_workers,
 197                                                      application_workers);
 198   }
 199   assert(new_active_workers > 0, "Always need at least 1");
 200   return new_active_workers;
 201 }
 202 
 203 uint AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
 204                                                   uintx active_workers,
 205                                                   uintx application_workers) {
 206   if (!UseDynamicNumberOfGCThreads ||
 207      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
 208     return ConcGCThreads;
 209   } else {
 210     uint no_of_gc_threads = calc_default_active_workers(total_workers,
 211                                                         1, /* Minimum number of workers */
 212                                                         active_workers,
 213                                                         application_workers);
 214     return no_of_gc_threads;
 215   }
 216 }
 217 
 218 bool AdaptiveSizePolicy::tenuring_threshold_change() const {
 219   return decrement_tenuring_threshold_for_gc_cost() ||
 220          increment_tenuring_threshold_for_gc_cost() ||
 221          decrement_tenuring_threshold_for_survivor_limit();
 222 }
 223 
 224 void AdaptiveSizePolicy::minor_collection_begin() {
 225   // Update the interval time
 226   _minor_timer.stop();
 227   // Save most recent collection time
 228   _latest_minor_mutator_interval_seconds = _minor_timer.seconds();
 229   _minor_timer.reset();
 230   _minor_timer.start();
 231 }
 232 
 233 void AdaptiveSizePolicy::update_minor_pause_young_estimator(
 234     double minor_pause_in_ms) {
 235   double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
 236   _minor_pause_young_estimator->update(eden_size_in_mbytes,
 237     minor_pause_in_ms);
 238 }
 239 
 240 void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
 241   // Update the pause time.
 242   _minor_timer.stop();
 243 
 244   if (!GCCause::is_user_requested_gc(gc_cause) ||
 245       UseAdaptiveSizePolicyWithSystemGC) {
 246     double minor_pause_in_seconds = _minor_timer.seconds();
 247     double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;
 248 
 249     // Sample for performance counter
 250     _avg_minor_pause->sample(minor_pause_in_seconds);
 251 
 252     // Cost of collection (unit-less)
 253     double collection_cost = 0.0;
 254     if ((_latest_minor_mutator_interval_seconds > 0.0) &&
 255         (minor_pause_in_seconds > 0.0)) {
 256       double interval_in_seconds =
 257         _latest_minor_mutator_interval_seconds + minor_pause_in_seconds;
 258       collection_cost =
 259         minor_pause_in_seconds / interval_in_seconds;
 260       _avg_minor_gc_cost->sample(collection_cost);
 261       // Sample for performance counter
 262       _avg_minor_interval->sample(interval_in_seconds);
 263     }
 264 
 265     // The policy does not have enough data until at least some
 266     // young collections have been done.
 267     _young_gen_policy_is_ready =
 268       (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
 269 
 270     // Calculate variables used to estimate pause time vs. gen sizes
 271     double eden_size_in_mbytes = ((double)_eden_size) / ((double)M);
 272     update_minor_pause_young_estimator(minor_pause_in_ms);
 273     update_minor_pause_old_estimator(minor_pause_in_ms);
 274 
 275     log_trace(gc, ergo)("AdaptiveSizePolicy::minor_collection_end: minor gc cost: %f  average: %f",
 276                         collection_cost, _avg_minor_gc_cost->average());
 277     log_trace(gc, ergo)("  minor pause: %f minor period %f",
 278                         minor_pause_in_ms, _latest_minor_mutator_interval_seconds * MILLIUNITS);
 279 
 280     // Calculate variable used to estimate collection cost vs. gen sizes
 281     assert(collection_cost >= 0.0, "Expected to be non-negative");
 282     _minor_collection_estimator->update(eden_size_in_mbytes, collection_cost);
 283   }
 284 
 285   // Interval times use this timer to measure the mutator time.
 286   // Reset the timer after the GC pause.
 287   _minor_timer.reset();
 288   _minor_timer.start();
 289 }
 290 
 291 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, uint percent_change) {
 292   size_t eden_heap_delta;
 293   eden_heap_delta = cur_eden / 100 * percent_change;
 294   return eden_heap_delta;
 295 }
 296 
 297 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden) {
 298   return eden_increment(cur_eden, YoungGenerationSizeIncrement);
 299 }
 300 
 301 size_t AdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
 302   size_t eden_heap_delta = eden_increment(cur_eden) /
 303     AdaptiveSizeDecrementScaleFactor;
 304   return eden_heap_delta;
 305 }
 306 
 307 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, uint percent_change) {
 308   size_t promo_heap_delta;
 309   promo_heap_delta = cur_promo / 100 * percent_change;
 310   return promo_heap_delta;
 311 }
 312 
 313 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo) {
 314   return promo_increment(cur_promo, TenuredGenerationSizeIncrement);
 315 }
 316 
 317 size_t AdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
 318   size_t promo_heap_delta = promo_increment(cur_promo);
 319   promo_heap_delta = promo_heap_delta / AdaptiveSizeDecrementScaleFactor;
 320   return promo_heap_delta;
 321 }
 322 
 323 double AdaptiveSizePolicy::time_since_major_gc() const {
 324   _major_timer.stop();
 325   double result = _major_timer.seconds();
 326   _major_timer.start();
 327   return result;
 328 }
 329 
 330 // Linear decay of major gc cost
 331 double AdaptiveSizePolicy::decaying_major_gc_cost() const {
 332   double major_interval = major_gc_interval_average_for_decay();
 333   double major_gc_cost_average = major_gc_cost();
 334   double decayed_major_gc_cost = major_gc_cost_average;
 335   if(time_since_major_gc() > 0.0) {
 336     decayed_major_gc_cost = major_gc_cost() *
 337       (((double) AdaptiveSizeMajorGCDecayTimeScale) * major_interval)
 338       / time_since_major_gc();
 339   }
 340 
 341   // The decayed cost should always be smaller than the
 342   // average cost but the vagaries of finite arithmetic could
 343   // produce a larger value in decayed_major_gc_cost so protect
 344   // against that.
 345   return MIN2(major_gc_cost_average, decayed_major_gc_cost);
 346 }
 347 
 348 // Use a value of the major gc cost that has been decayed
 349 // by the factor
 350 //
 351 //      average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
 352 //        time-since-last-major-gc
 353 //
 354 // if the average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale
 355 // is less than time-since-last-major-gc.
 356 //
 357 // In cases where there are initial major gc's that
 358 // are of a relatively high cost but no later major
 359 // gc's, the total gc cost can remain high because
 360 // the major gc cost remains unchanged (since there are no major
 361 // gc's).  In such a situation the value of the unchanging
 362 // major gc cost can keep the mutator throughput below
 363 // the goal when in fact the major gc cost is becoming diminishingly
 364 // small.  Use the decaying gc cost only to decide whether to
 365 // adjust for throughput.  Using it also to determine the adjustment
 366 // to be made for throughput also seems reasonable but there is
 367 // no test case to use to decide if it is the right thing to do
 368 // don't do it yet.
 369 
 370 double AdaptiveSizePolicy::decaying_gc_cost() const {
 371   double decayed_major_gc_cost = major_gc_cost();
 372   double avg_major_interval = major_gc_interval_average_for_decay();
 373   if (UseAdaptiveSizeDecayMajorGCCost &&
 374       (AdaptiveSizeMajorGCDecayTimeScale > 0) &&
 375       (avg_major_interval > 0.00)) {
 376     double time_since_last_major_gc = time_since_major_gc();
 377 
 378     // Decay the major gc cost?
 379     if (time_since_last_major_gc >
 380         ((double) AdaptiveSizeMajorGCDecayTimeScale) * avg_major_interval) {
 381 
 382       // Decay using the time-since-last-major-gc
 383       decayed_major_gc_cost = decaying_major_gc_cost();
 384       log_trace(gc, ergo)("decaying_gc_cost: major interval average: %f  time since last major gc: %f",
 385                     avg_major_interval, time_since_last_major_gc);
 386       log_trace(gc, ergo)("  major gc cost: %f  decayed major gc cost: %f",
 387                     major_gc_cost(), decayed_major_gc_cost);
 388     }
 389   }
 390   double result = MIN2(1.0, decayed_major_gc_cost + minor_gc_cost());
 391   return result;
 392 }
 393 
 394 
 395 void AdaptiveSizePolicy::clear_generation_free_space_flags() {
 396   set_change_young_gen_for_min_pauses(0);
 397   set_change_old_gen_for_maj_pauses(0);
 398 
 399   set_change_old_gen_for_throughput(0);
 400   set_change_young_gen_for_throughput(0);
 401   set_decrease_for_footprint(0);
 402   set_decide_at_full_gc(0);
 403 }
 404 
 405 void AdaptiveSizePolicy::check_gc_overhead_limit(
 406                                           size_t young_live,
 407                                           size_t eden_live,
 408                                           size_t max_old_gen_size,
 409                                           size_t max_eden_size,
 410                                           bool   is_full_gc,
 411                                           GCCause::Cause gc_cause,
 412                                           CollectorPolicy* collector_policy) {
 413 
 414   // Ignore explicit GC's.  Exiting here does not set the flag and
 415   // does not reset the count.  Updating of the averages for system
 416   // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
 417   if (GCCause::is_user_requested_gc(gc_cause) ||
 418       GCCause::is_serviceability_requested_gc(gc_cause)) {
 419     return;
 420   }
 421   // eden_limit is the upper limit on the size of eden based on
 422   // the maximum size of the young generation and the sizes
 423   // of the survivor space.
 424   // The question being asked is whether the gc costs are high
 425   // and the space being recovered by a collection is low.
 426   // free_in_young_gen is the free space in the young generation
 427   // after a collection and promo_live is the free space in the old
 428   // generation after a collection.
 429   //
 430   // Use the minimum of the current value of the live in the
 431   // young gen or the average of the live in the young gen.
 432   // If the current value drops quickly, that should be taken
 433   // into account (i.e., don't trigger if the amount of free
 434   // space has suddenly jumped up).  If the current is much
 435   // higher than the average, use the average since it represents
 436   // the longer term behavior.
 437   const size_t live_in_eden =
 438     MIN2(eden_live, (size_t) avg_eden_live()->average());
 439   const size_t free_in_eden = max_eden_size > live_in_eden ?
 440     max_eden_size - live_in_eden : 0;
 441   const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
 442   const size_t total_free_limit = free_in_old_gen + free_in_eden;
 443   const size_t total_mem = max_old_gen_size + max_eden_size;
 444   const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
 445   const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
 446   const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
 447   const double gc_cost_limit = GCTimeLimit/100.0;
 448   size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
 449   // But don't force a promo size below the current promo size. Otherwise,
 450   // the promo size will shrink for no good reason.
 451   promo_limit = MAX2(promo_limit, _promo_size);
 452 
 453 
 454   log_trace(gc, ergo)(
 455         "PSAdaptiveSizePolicy::check_gc_overhead_limit:"
 456         " promo_limit: " SIZE_FORMAT
 457         " max_eden_size: " SIZE_FORMAT
 458         " total_free_limit: " SIZE_FORMAT
 459         " max_old_gen_size: " SIZE_FORMAT
 460         " max_eden_size: " SIZE_FORMAT
 461         " mem_free_limit: " SIZE_FORMAT,
 462         promo_limit, max_eden_size, total_free_limit,
 463         max_old_gen_size, max_eden_size,
 464         (size_t) mem_free_limit);
 465 
 466   bool print_gc_overhead_limit_would_be_exceeded = false;
 467   if (is_full_gc) {
 468     if (gc_cost() > gc_cost_limit &&
 469       free_in_old_gen < (size_t) mem_free_old_limit &&
 470       free_in_eden < (size_t) mem_free_eden_limit) {
 471       // Collections, on average, are taking too much time, and
 472       //      gc_cost() > gc_cost_limit
 473       // we have too little space available after a full gc.
 474       //      total_free_limit < mem_free_limit
 475       // where
 476       //   total_free_limit is the free space available in
 477       //     both generations
 478       //   total_mem is the total space available for allocation
 479       //     in both generations (survivor spaces are not included
 480       //     just as they are not included in eden_limit).
 481       //   mem_free_limit is a fraction of total_mem judged to be an
 482       //     acceptable amount that is still unused.
 483       // The heap can ask for the value of this variable when deciding
 484       // whether to thrown an OutOfMemory error.
 485       // Note that the gc time limit test only works for the collections
 486       // of the young gen + tenured gen and not for collections of the
 487       // permanent gen.  That is because the calculation of the space
 488       // freed by the collection is the free space in the young gen +
 489       // tenured gen.
 490       // At this point the GC overhead limit is being exceeded.
 491       inc_gc_overhead_limit_count();
 492       if (UseGCOverheadLimit) {
 493         if (gc_overhead_limit_count() >=
 494             AdaptiveSizePolicyGCTimeLimitThreshold){
 495           // All conditions have been met for throwing an out-of-memory
 496           set_gc_overhead_limit_exceeded(true);
 497           // Avoid consecutive OOM due to the gc time limit by resetting
 498           // the counter.
 499           reset_gc_overhead_limit_count();
 500         } else {
 501           // The required consecutive collections which exceed the
 502           // GC time limit may or may not have been reached. We
 503           // are approaching that condition and so as not to
 504           // throw an out-of-memory before all SoftRef's have been
 505           // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
 506           // The clearing will be done on the next GC.
 507           bool near_limit = gc_overhead_limit_near();
 508           if (near_limit) {
 509             collector_policy->set_should_clear_all_soft_refs(true);
 510             log_trace(gc, ergo)("Nearing GC overhead limit, will be clearing all SoftReference");
 511           }
 512         }
 513       }
 514       // Set this even when the overhead limit will not
 515       // cause an out-of-memory.  Diagnostic message indicating
 516       // that the overhead limit is being exceeded is sometimes
 517       // printed.
 518       print_gc_overhead_limit_would_be_exceeded = true;
 519 
 520     } else {
 521       // Did not exceed overhead limits
 522       reset_gc_overhead_limit_count();
 523     }
 524   }
 525 
 526   if (UseGCOverheadLimit) {
 527     if (gc_overhead_limit_exceeded()) {
 528       log_trace(gc, ergo)("GC is exceeding overhead limit of " UINTX_FORMAT "%%", GCTimeLimit);
 529       reset_gc_overhead_limit_count();
 530     } else if (print_gc_overhead_limit_would_be_exceeded) {
 531       assert(gc_overhead_limit_count() > 0, "Should not be printing");
 532       log_trace(gc, ergo)("GC would exceed overhead limit of " UINTX_FORMAT "%% %d consecutive time(s)",
 533                           GCTimeLimit, gc_overhead_limit_count());
 534     }
 535   }
 536 }
 537 // Printing
 538 
 539 bool AdaptiveSizePolicy::print() const {
 540   assert(UseAdaptiveSizePolicy, "UseAdaptiveSizePolicy need to be enabled.");
 541 
 542   if (!log_is_enabled(Debug, gc, ergo)) {
 543     return false;
 544   }
 545 
 546   // Print goal for which action is needed.
 547   char* action = NULL;
 548   bool change_for_pause = false;
 549   if ((change_old_gen_for_maj_pauses() ==
 550          decrease_old_gen_for_maj_pauses_true) ||
 551       (change_young_gen_for_min_pauses() ==
 552          decrease_young_gen_for_min_pauses_true)) {
 553     action = (char*) " *** pause time goal ***";
 554     change_for_pause = true;
 555   } else if ((change_old_gen_for_throughput() ==
 556                increase_old_gen_for_throughput_true) ||
 557             (change_young_gen_for_throughput() ==
 558                increase_young_gen_for_througput_true)) {
 559     action = (char*) " *** throughput goal ***";
 560   } else if (decrease_for_footprint()) {
 561     action = (char*) " *** reduced footprint ***";
 562   } else {
 563     // No actions were taken.  This can legitimately be the
 564     // situation if not enough data has been gathered to make
 565     // decisions.
 566     return false;
 567   }
 568 
 569   // Pauses
 570   // Currently the size of the old gen is only adjusted to
 571   // change the major pause times.
 572   char* young_gen_action = NULL;
 573   char* tenured_gen_action = NULL;
 574 
 575   char* shrink_msg = (char*) "(attempted to shrink)";
 576   char* grow_msg = (char*) "(attempted to grow)";
 577   char* no_change_msg = (char*) "(no change)";
 578   if (change_young_gen_for_min_pauses() ==
 579       decrease_young_gen_for_min_pauses_true) {
 580     young_gen_action = shrink_msg;
 581   } else if (change_for_pause) {
 582     young_gen_action = no_change_msg;
 583   }
 584 
 585   if (change_old_gen_for_maj_pauses() == decrease_old_gen_for_maj_pauses_true) {
 586     tenured_gen_action = shrink_msg;
 587   } else if (change_for_pause) {
 588     tenured_gen_action = no_change_msg;
 589   }
 590 
 591   // Throughput
 592   if (change_old_gen_for_throughput() == increase_old_gen_for_throughput_true) {
 593     assert(change_young_gen_for_throughput() ==
 594            increase_young_gen_for_througput_true,
 595            "Both generations should be growing");
 596     young_gen_action = grow_msg;
 597     tenured_gen_action = grow_msg;
 598   } else if (change_young_gen_for_throughput() ==
 599              increase_young_gen_for_througput_true) {
 600     // Only the young generation may grow at start up (before
 601     // enough full collections have been done to grow the old generation).
 602     young_gen_action = grow_msg;
 603     tenured_gen_action = no_change_msg;
 604   }
 605 
 606   // Minimum footprint
 607   if (decrease_for_footprint() != 0) {
 608     young_gen_action = shrink_msg;
 609     tenured_gen_action = shrink_msg;
 610   }
 611 
 612   log_debug(gc, ergo)("UseAdaptiveSizePolicy actions to meet %s", action);
 613   log_debug(gc, ergo)("                       GC overhead (%%)");
 614   log_debug(gc, ergo)("    Young generation:     %7.2f\t  %s",
 615                       100.0 * avg_minor_gc_cost()->average(), young_gen_action);
 616   log_debug(gc, ergo)("    Tenured generation:   %7.2f\t  %s",
 617                       100.0 * avg_major_gc_cost()->average(), tenured_gen_action);
 618   return true;
 619 }
 620 
 621 void AdaptiveSizePolicy::print_tenuring_threshold( uint new_tenuring_threshold_arg) const {
 622   // Tenuring threshold
 623   if (decrement_tenuring_threshold_for_survivor_limit()) {
 624     log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to avoid survivor space overflow) = %u", new_tenuring_threshold_arg);
 625   } else if (decrement_tenuring_threshold_for_gc_cost()) {
 626     log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to balance GC costs) = %u", new_tenuring_threshold_arg);
 627   } else if (increment_tenuring_threshold_for_gc_cost()) {
 628     log_debug(gc, ergo)("Tenuring threshold: (attempted to increase to balance GC costs) = %u", new_tenuring_threshold_arg);
 629   } else {
 630     assert(!tenuring_threshold_change(), "(no change was attempted)");
 631   }
 632 }