1 /*
   2  * Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/adaptiveSizePolicy.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcCause.hpp"
  29 #include "gc/shared/workgroup.hpp"
  30 #include "logging/log.hpp"
  31 #include "runtime/timer.hpp"
  32 #include "utilities/ostream.hpp"
  33 elapsedTimer AdaptiveSizePolicy::_minor_timer;
  34 elapsedTimer AdaptiveSizePolicy::_major_timer;
  35 bool AdaptiveSizePolicy::_debug_perturbation = false;
  36 
  37 // The throughput goal is implemented as
  38 //      _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
  39 // gc_cost_ratio is the ratio
  40 //      application cost / gc cost
  41 // For example a gc_cost_ratio of 4 translates into a
  42 // throughput goal of .80
  43 
  44 AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
  45                                        size_t init_promo_size,
  46                                        size_t init_survivor_size,
  47                                        double gc_pause_goal_sec,
  48                                        uint gc_cost_ratio) :
  49     _eden_size(init_eden_size),
  50     _promo_size(init_promo_size),
  51     _survivor_size(init_survivor_size),
  52     _gc_pause_goal_sec(gc_pause_goal_sec),
  53     _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
  54     _gc_overhead_limit_exceeded(false),
  55     _print_gc_overhead_limit_would_be_exceeded(false),
  56     _gc_overhead_limit_count(0),
  57     _latest_minor_mutator_interval_seconds(0),
  58     _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
  59     _young_gen_change_for_minor_throughput(0),
  60     _old_gen_change_for_major_throughput(0) {
  61   assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
  62     "No opportunity to clear SoftReferences before GC overhead limit");
  63   _avg_minor_pause    =
  64     new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
  65   _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  66   _avg_minor_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  67   _avg_major_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  68 
  69   _avg_young_live     = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  70   _avg_old_live       = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  71   _avg_eden_live      = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  72 
  73   _avg_survived       = new AdaptivePaddedAverage(AdaptiveSizePolicyWeight,
  74                                                   SurvivorPadding);
  75   _avg_pretenured     = new AdaptivePaddedNoZeroDevAverage(
  76                                                   AdaptiveSizePolicyWeight,
  77                                                   SurvivorPadding);
  78 
  79   _minor_pause_old_estimator =
  80     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  81   _minor_pause_young_estimator =
  82     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  83   _minor_collection_estimator =
  84     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  85   _major_collection_estimator =
  86     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  87 
  88   // Start the timers
  89   _minor_timer.start();
  90 
  91   _young_gen_policy_is_ready = false;
  92 }
  93 
  94 //  If the number of GC threads was set on the command line,
  95 // use it.
  96 //  Else
  97 //    Calculate the number of GC threads based on the number of Java threads.
  98 //    Calculate the number of GC threads based on the size of the heap.
  99 //    Use the larger.
 100 
 101 uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
 102                                                      const uintx min_workers,
 103                                                      uintx active_workers,
 104                                                      uintx application_workers) {
 105   // If the user has specifically set the number of
 106   // GC threads, use them.
 107 
 108   // If the user has turned off using a dynamic number of GC threads
 109   // or the users has requested a specific number, set the active
 110   // number of workers to all the workers.
 111 
 112   uintx new_active_workers = total_workers;
 113   uintx prev_active_workers = active_workers;
 114   uintx active_workers_by_JT = 0;
 115   uintx active_workers_by_heap_size = 0;
 116 
 117   // Always use at least min_workers but use up to
 118   // GCThreadsPerJavaThreads * application threads.
 119   active_workers_by_JT =
 120     MAX2((uintx) GCWorkersPerJavaThread * application_workers,
 121          min_workers);
 122 
 123   // Choose a number of GC threads based on the current size
 124   // of the heap.  This may be complicated because the size of
 125   // the heap depends on factors such as the throughput goal.
 126   // Still a large heap should be collected by more GC threads.
 127   active_workers_by_heap_size =
 128       MAX2((size_t) 2U, GC::gc()->heap()->capacity() / HeapSizePerGCThread);
 129 
 130   uintx max_active_workers =
 131     MAX2(active_workers_by_JT, active_workers_by_heap_size);
 132 
 133   new_active_workers = MIN2(max_active_workers, (uintx) total_workers);
 134 
 135   // Increase GC workers instantly but decrease them more
 136   // slowly.
 137   if (new_active_workers < prev_active_workers) {
 138     new_active_workers =
 139       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
 140   }
 141 
 142   // Check once more that the number of workers is within the limits.
 143   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
 144   assert(new_active_workers >= min_workers, "Minimum workers not observed");
 145   assert(new_active_workers <= total_workers, "Total workers not observed");
 146 
 147   if (ForceDynamicNumberOfGCThreads) {
 148     // Assume this is debugging and jiggle the number of GC threads.
 149     if (new_active_workers == prev_active_workers) {
 150       if (new_active_workers < total_workers) {
 151         new_active_workers++;
 152       } else if (new_active_workers > min_workers) {
 153         new_active_workers--;
 154       }
 155     }
 156     if (new_active_workers == total_workers) {
 157       if (_debug_perturbation) {
 158         new_active_workers =  min_workers;
 159       }
 160       _debug_perturbation = !_debug_perturbation;
 161     }
 162     assert((new_active_workers <= ParallelGCThreads) &&
 163            (new_active_workers >= min_workers),
 164       "Jiggled active workers too much");
 165   }
 166 
 167   log_trace(gc, task)("GCTaskManager::calc_default_active_workers() : "
 168      "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
 169      "prev_active_workers: " UINTX_FORMAT "\n"
 170      " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_heap_size: " UINTX_FORMAT,
 171      active_workers, new_active_workers, prev_active_workers,
 172      active_workers_by_JT, active_workers_by_heap_size);
 173   assert(new_active_workers > 0, "Always need at least 1");
 174   return new_active_workers;
 175 }
 176 
 177 uint AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
 178                                              uintx active_workers,
 179                                              uintx application_workers) {
 180   // If the user has specifically set the number of
 181   // GC threads, use them.
 182 
 183   // If the user has turned off using a dynamic number of GC threads
 184   // or the users has requested a specific number, set the active
 185   // number of workers to all the workers.
 186 
 187   uint new_active_workers;
 188   if (!UseDynamicNumberOfGCThreads ||
 189      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
 190     new_active_workers = total_workers;
 191   } else {
 192     uintx min_workers = (total_workers == 1) ? 1 : 2;
 193     new_active_workers = calc_default_active_workers(total_workers,
 194                                                      min_workers,
 195                                                      active_workers,
 196                                                      application_workers);
 197   }
 198   assert(new_active_workers > 0, "Always need at least 1");
 199   return new_active_workers;
 200 }
 201 
 202 uint AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
 203                                                   uintx active_workers,
 204                                                   uintx application_workers) {
 205   if (!UseDynamicNumberOfGCThreads ||
 206      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
 207     return ConcGCThreads;
 208   } else {
 209     uint no_of_gc_threads = calc_default_active_workers(total_workers,
 210                                                         1, /* Minimum number of workers */
 211                                                         active_workers,
 212                                                         application_workers);
 213     return no_of_gc_threads;
 214   }
 215 }
 216 
 217 bool AdaptiveSizePolicy::tenuring_threshold_change() const {
 218   return decrement_tenuring_threshold_for_gc_cost() ||
 219          increment_tenuring_threshold_for_gc_cost() ||
 220          decrement_tenuring_threshold_for_survivor_limit();
 221 }
 222 
 223 void AdaptiveSizePolicy::minor_collection_begin() {
 224   // Update the interval time
 225   _minor_timer.stop();
 226   // Save most recent collection time
 227   _latest_minor_mutator_interval_seconds = _minor_timer.seconds();
 228   _minor_timer.reset();
 229   _minor_timer.start();
 230 }
 231 
 232 void AdaptiveSizePolicy::update_minor_pause_young_estimator(
 233     double minor_pause_in_ms) {
 234   double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
 235   _minor_pause_young_estimator->update(eden_size_in_mbytes,
 236     minor_pause_in_ms);
 237 }
 238 
 239 void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
 240   // Update the pause time.
 241   _minor_timer.stop();
 242 
 243   if (!GCCause::is_user_requested_gc(gc_cause) ||
 244       UseAdaptiveSizePolicyWithSystemGC) {
 245     double minor_pause_in_seconds = _minor_timer.seconds();
 246     double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;
 247 
 248     // Sample for performance counter
 249     _avg_minor_pause->sample(minor_pause_in_seconds);
 250 
 251     // Cost of collection (unit-less)
 252     double collection_cost = 0.0;
 253     if ((_latest_minor_mutator_interval_seconds > 0.0) &&
 254         (minor_pause_in_seconds > 0.0)) {
 255       double interval_in_seconds =
 256         _latest_minor_mutator_interval_seconds + minor_pause_in_seconds;
 257       collection_cost =
 258         minor_pause_in_seconds / interval_in_seconds;
 259       _avg_minor_gc_cost->sample(collection_cost);
 260       // Sample for performance counter
 261       _avg_minor_interval->sample(interval_in_seconds);
 262     }
 263 
 264     // The policy does not have enough data until at least some
 265     // young collections have been done.
 266     _young_gen_policy_is_ready =
 267       (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
 268 
 269     // Calculate variables used to estimate pause time vs. gen sizes
 270     double eden_size_in_mbytes = ((double)_eden_size) / ((double)M);
 271     update_minor_pause_young_estimator(minor_pause_in_ms);
 272     update_minor_pause_old_estimator(minor_pause_in_ms);
 273 
 274     log_trace(gc, ergo)("AdaptiveSizePolicy::minor_collection_end: minor gc cost: %f  average: %f",
 275                         collection_cost, _avg_minor_gc_cost->average());
 276     log_trace(gc, ergo)("  minor pause: %f minor period %f",
 277                         minor_pause_in_ms, _latest_minor_mutator_interval_seconds * MILLIUNITS);
 278 
 279     // Calculate variable used to estimate collection cost vs. gen sizes
 280     assert(collection_cost >= 0.0, "Expected to be non-negative");
 281     _minor_collection_estimator->update(eden_size_in_mbytes, collection_cost);
 282   }
 283 
 284   // Interval times use this timer to measure the mutator time.
 285   // Reset the timer after the GC pause.
 286   _minor_timer.reset();
 287   _minor_timer.start();
 288 }
 289 
 290 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, uint percent_change) {
 291   size_t eden_heap_delta;
 292   eden_heap_delta = cur_eden / 100 * percent_change;
 293   return eden_heap_delta;
 294 }
 295 
 296 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden) {
 297   return eden_increment(cur_eden, YoungGenerationSizeIncrement);
 298 }
 299 
 300 size_t AdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
 301   size_t eden_heap_delta = eden_increment(cur_eden) /
 302     AdaptiveSizeDecrementScaleFactor;
 303   return eden_heap_delta;
 304 }
 305 
 306 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, uint percent_change) {
 307   size_t promo_heap_delta;
 308   promo_heap_delta = cur_promo / 100 * percent_change;
 309   return promo_heap_delta;
 310 }
 311 
 312 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo) {
 313   return promo_increment(cur_promo, TenuredGenerationSizeIncrement);
 314 }
 315 
 316 size_t AdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
 317   size_t promo_heap_delta = promo_increment(cur_promo);
 318   promo_heap_delta = promo_heap_delta / AdaptiveSizeDecrementScaleFactor;
 319   return promo_heap_delta;
 320 }
 321 
 322 double AdaptiveSizePolicy::time_since_major_gc() const {
 323   _major_timer.stop();
 324   double result = _major_timer.seconds();
 325   _major_timer.start();
 326   return result;
 327 }
 328 
 329 // Linear decay of major gc cost
 330 double AdaptiveSizePolicy::decaying_major_gc_cost() const {
 331   double major_interval = major_gc_interval_average_for_decay();
 332   double major_gc_cost_average = major_gc_cost();
 333   double decayed_major_gc_cost = major_gc_cost_average;
 334   if(time_since_major_gc() > 0.0) {
 335     decayed_major_gc_cost = major_gc_cost() *
 336       (((double) AdaptiveSizeMajorGCDecayTimeScale) * major_interval)
 337       / time_since_major_gc();
 338   }
 339 
 340   // The decayed cost should always be smaller than the
 341   // average cost but the vagaries of finite arithmetic could
 342   // produce a larger value in decayed_major_gc_cost so protect
 343   // against that.
 344   return MIN2(major_gc_cost_average, decayed_major_gc_cost);
 345 }
 346 
 347 // Use a value of the major gc cost that has been decayed
 348 // by the factor
 349 //
 350 //      average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
 351 //        time-since-last-major-gc
 352 //
 353 // if the average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale
 354 // is less than time-since-last-major-gc.
 355 //
 356 // In cases where there are initial major gc's that
 357 // are of a relatively high cost but no later major
 358 // gc's, the total gc cost can remain high because
 359 // the major gc cost remains unchanged (since there are no major
 360 // gc's).  In such a situation the value of the unchanging
 361 // major gc cost can keep the mutator throughput below
 362 // the goal when in fact the major gc cost is becoming diminishingly
 363 // small.  Use the decaying gc cost only to decide whether to
 364 // adjust for throughput.  Using it also to determine the adjustment
 365 // to be made for throughput also seems reasonable but there is
 366 // no test case to use to decide if it is the right thing to do
 367 // don't do it yet.
 368 
 369 double AdaptiveSizePolicy::decaying_gc_cost() const {
 370   double decayed_major_gc_cost = major_gc_cost();
 371   double avg_major_interval = major_gc_interval_average_for_decay();
 372   if (UseAdaptiveSizeDecayMajorGCCost &&
 373       (AdaptiveSizeMajorGCDecayTimeScale > 0) &&
 374       (avg_major_interval > 0.00)) {
 375     double time_since_last_major_gc = time_since_major_gc();
 376 
 377     // Decay the major gc cost?
 378     if (time_since_last_major_gc >
 379         ((double) AdaptiveSizeMajorGCDecayTimeScale) * avg_major_interval) {
 380 
 381       // Decay using the time-since-last-major-gc
 382       decayed_major_gc_cost = decaying_major_gc_cost();
 383       log_trace(gc, ergo)("decaying_gc_cost: major interval average: %f  time since last major gc: %f",
 384                     avg_major_interval, time_since_last_major_gc);
 385       log_trace(gc, ergo)("  major gc cost: %f  decayed major gc cost: %f",
 386                     major_gc_cost(), decayed_major_gc_cost);
 387     }
 388   }
 389   double result = MIN2(1.0, decayed_major_gc_cost + minor_gc_cost());
 390   return result;
 391 }
 392 
 393 
 394 void AdaptiveSizePolicy::clear_generation_free_space_flags() {
 395   set_change_young_gen_for_min_pauses(0);
 396   set_change_old_gen_for_maj_pauses(0);
 397 
 398   set_change_old_gen_for_throughput(0);
 399   set_change_young_gen_for_throughput(0);
 400   set_decrease_for_footprint(0);
 401   set_decide_at_full_gc(0);
 402 }
 403 
 404 void AdaptiveSizePolicy::check_gc_overhead_limit(
 405                                           size_t young_live,
 406                                           size_t eden_live,
 407                                           size_t max_old_gen_size,
 408                                           size_t max_eden_size,
 409                                           bool   is_full_gc,
 410                                           GCCause::Cause gc_cause,
 411                                           CollectorPolicy* collector_policy) {
 412 
 413   // Ignore explicit GC's.  Exiting here does not set the flag and
 414   // does not reset the count.  Updating of the averages for system
 415   // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
 416   if (GCCause::is_user_requested_gc(gc_cause) ||
 417       GCCause::is_serviceability_requested_gc(gc_cause)) {
 418     return;
 419   }
 420   // eden_limit is the upper limit on the size of eden based on
 421   // the maximum size of the young generation and the sizes
 422   // of the survivor space.
 423   // The question being asked is whether the gc costs are high
 424   // and the space being recovered by a collection is low.
 425   // free_in_young_gen is the free space in the young generation
 426   // after a collection and promo_live is the free space in the old
 427   // generation after a collection.
 428   //
 429   // Use the minimum of the current value of the live in the
 430   // young gen or the average of the live in the young gen.
 431   // If the current value drops quickly, that should be taken
 432   // into account (i.e., don't trigger if the amount of free
 433   // space has suddenly jumped up).  If the current is much
 434   // higher than the average, use the average since it represents
 435   // the longer term behavior.
 436   const size_t live_in_eden =
 437     MIN2(eden_live, (size_t) avg_eden_live()->average());
 438   const size_t free_in_eden = max_eden_size > live_in_eden ?
 439     max_eden_size - live_in_eden : 0;
 440   const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
 441   const size_t total_free_limit = free_in_old_gen + free_in_eden;
 442   const size_t total_mem = max_old_gen_size + max_eden_size;
 443   const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
 444   const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
 445   const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
 446   const double gc_cost_limit = GCTimeLimit/100.0;
 447   size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
 448   // But don't force a promo size below the current promo size. Otherwise,
 449   // the promo size will shrink for no good reason.
 450   promo_limit = MAX2(promo_limit, _promo_size);
 451 
 452 
 453   log_trace(gc, ergo)(
 454         "PSAdaptiveSizePolicy::check_gc_overhead_limit:"
 455         " promo_limit: " SIZE_FORMAT
 456         " max_eden_size: " SIZE_FORMAT
 457         " total_free_limit: " SIZE_FORMAT
 458         " max_old_gen_size: " SIZE_FORMAT
 459         " max_eden_size: " SIZE_FORMAT
 460         " mem_free_limit: " SIZE_FORMAT,
 461         promo_limit, max_eden_size, total_free_limit,
 462         max_old_gen_size, max_eden_size,
 463         (size_t) mem_free_limit);
 464 
 465   bool print_gc_overhead_limit_would_be_exceeded = false;
 466   if (is_full_gc) {
 467     if (gc_cost() > gc_cost_limit &&
 468       free_in_old_gen < (size_t) mem_free_old_limit &&
 469       free_in_eden < (size_t) mem_free_eden_limit) {
 470       // Collections, on average, are taking too much time, and
 471       //      gc_cost() > gc_cost_limit
 472       // we have too little space available after a full gc.
 473       //      total_free_limit < mem_free_limit
 474       // where
 475       //   total_free_limit is the free space available in
 476       //     both generations
 477       //   total_mem is the total space available for allocation
 478       //     in both generations (survivor spaces are not included
 479       //     just as they are not included in eden_limit).
 480       //   mem_free_limit is a fraction of total_mem judged to be an
 481       //     acceptable amount that is still unused.
 482       // The heap can ask for the value of this variable when deciding
 483       // whether to thrown an OutOfMemory error.
 484       // Note that the gc time limit test only works for the collections
 485       // of the young gen + tenured gen and not for collections of the
 486       // permanent gen.  That is because the calculation of the space
 487       // freed by the collection is the free space in the young gen +
 488       // tenured gen.
 489       // At this point the GC overhead limit is being exceeded.
 490       inc_gc_overhead_limit_count();
 491       if (UseGCOverheadLimit) {
 492         if (gc_overhead_limit_count() >=
 493             AdaptiveSizePolicyGCTimeLimitThreshold){
 494           // All conditions have been met for throwing an out-of-memory
 495           set_gc_overhead_limit_exceeded(true);
 496           // Avoid consecutive OOM due to the gc time limit by resetting
 497           // the counter.
 498           reset_gc_overhead_limit_count();
 499         } else {
 500           // The required consecutive collections which exceed the
 501           // GC time limit may or may not have been reached. We
 502           // are approaching that condition and so as not to
 503           // throw an out-of-memory before all SoftRef's have been
 504           // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
 505           // The clearing will be done on the next GC.
 506           bool near_limit = gc_overhead_limit_near();
 507           if (near_limit) {
 508             collector_policy->set_should_clear_all_soft_refs(true);
 509             log_trace(gc, ergo)("Nearing GC overhead limit, will be clearing all SoftReference");
 510           }
 511         }
 512       }
 513       // Set this even when the overhead limit will not
 514       // cause an out-of-memory.  Diagnostic message indicating
 515       // that the overhead limit is being exceeded is sometimes
 516       // printed.
 517       print_gc_overhead_limit_would_be_exceeded = true;
 518 
 519     } else {
 520       // Did not exceed overhead limits
 521       reset_gc_overhead_limit_count();
 522     }
 523   }
 524 
 525   if (UseGCOverheadLimit) {
 526     if (gc_overhead_limit_exceeded()) {
 527       log_trace(gc, ergo)("GC is exceeding overhead limit of " UINTX_FORMAT "%%", GCTimeLimit);
 528       reset_gc_overhead_limit_count();
 529     } else if (print_gc_overhead_limit_would_be_exceeded) {
 530       assert(gc_overhead_limit_count() > 0, "Should not be printing");
 531       log_trace(gc, ergo)("GC would exceed overhead limit of " UINTX_FORMAT "%% %d consecutive time(s)",
 532                           GCTimeLimit, gc_overhead_limit_count());
 533     }
 534   }
 535 }
 536 // Printing
 537 
 538 bool AdaptiveSizePolicy::print() const {
 539   assert(UseAdaptiveSizePolicy, "UseAdaptiveSizePolicy need to be enabled.");
 540 
 541   if (!log_is_enabled(Debug, gc, ergo)) {
 542     return false;
 543   }
 544 
 545   // Print goal for which action is needed.
 546   char* action = NULL;
 547   bool change_for_pause = false;
 548   if ((change_old_gen_for_maj_pauses() ==
 549          decrease_old_gen_for_maj_pauses_true) ||
 550       (change_young_gen_for_min_pauses() ==
 551          decrease_young_gen_for_min_pauses_true)) {
 552     action = (char*) " *** pause time goal ***";
 553     change_for_pause = true;
 554   } else if ((change_old_gen_for_throughput() ==
 555                increase_old_gen_for_throughput_true) ||
 556             (change_young_gen_for_throughput() ==
 557                increase_young_gen_for_througput_true)) {
 558     action = (char*) " *** throughput goal ***";
 559   } else if (decrease_for_footprint()) {
 560     action = (char*) " *** reduced footprint ***";
 561   } else {
 562     // No actions were taken.  This can legitimately be the
 563     // situation if not enough data has been gathered to make
 564     // decisions.
 565     return false;
 566   }
 567 
 568   // Pauses
 569   // Currently the size of the old gen is only adjusted to
 570   // change the major pause times.
 571   char* young_gen_action = NULL;
 572   char* tenured_gen_action = NULL;
 573 
 574   char* shrink_msg = (char*) "(attempted to shrink)";
 575   char* grow_msg = (char*) "(attempted to grow)";
 576   char* no_change_msg = (char*) "(no change)";
 577   if (change_young_gen_for_min_pauses() ==
 578       decrease_young_gen_for_min_pauses_true) {
 579     young_gen_action = shrink_msg;
 580   } else if (change_for_pause) {
 581     young_gen_action = no_change_msg;
 582   }
 583 
 584   if (change_old_gen_for_maj_pauses() == decrease_old_gen_for_maj_pauses_true) {
 585     tenured_gen_action = shrink_msg;
 586   } else if (change_for_pause) {
 587     tenured_gen_action = no_change_msg;
 588   }
 589 
 590   // Throughput
 591   if (change_old_gen_for_throughput() == increase_old_gen_for_throughput_true) {
 592     assert(change_young_gen_for_throughput() ==
 593            increase_young_gen_for_througput_true,
 594            "Both generations should be growing");
 595     young_gen_action = grow_msg;
 596     tenured_gen_action = grow_msg;
 597   } else if (change_young_gen_for_throughput() ==
 598              increase_young_gen_for_througput_true) {
 599     // Only the young generation may grow at start up (before
 600     // enough full collections have been done to grow the old generation).
 601     young_gen_action = grow_msg;
 602     tenured_gen_action = no_change_msg;
 603   }
 604 
 605   // Minimum footprint
 606   if (decrease_for_footprint() != 0) {
 607     young_gen_action = shrink_msg;
 608     tenured_gen_action = shrink_msg;
 609   }
 610 
 611   log_debug(gc, ergo)("UseAdaptiveSizePolicy actions to meet %s", action);
 612   log_debug(gc, ergo)("                       GC overhead (%%)");
 613   log_debug(gc, ergo)("    Young generation:     %7.2f\t  %s",
 614                       100.0 * avg_minor_gc_cost()->average(), young_gen_action);
 615   log_debug(gc, ergo)("    Tenured generation:   %7.2f\t  %s",
 616                       100.0 * avg_major_gc_cost()->average(), tenured_gen_action);
 617   return true;
 618 }
 619 
 620 void AdaptiveSizePolicy::print_tenuring_threshold( uint new_tenuring_threshold_arg) const {
 621   // Tenuring threshold
 622   if (decrement_tenuring_threshold_for_survivor_limit()) {
 623     log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to avoid survivor space overflow) = %u", new_tenuring_threshold_arg);
 624   } else if (decrement_tenuring_threshold_for_gc_cost()) {
 625     log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to balance GC costs) = %u", new_tenuring_threshold_arg);
 626   } else if (increment_tenuring_threshold_for_gc_cost()) {
 627     log_debug(gc, ergo)("Tenuring threshold: (attempted to increase to balance GC costs) = %u", new_tenuring_threshold_arg);
 628   } else {
 629     assert(!tenuring_threshold_change(), "(no change was attempted)");
 630   }
 631 }
 632 
 633 bool AdaptiveSizePolicyOutput::enabled() {
 634   return UseParallelGC &&
 635     UseAdaptiveSizePolicy &&
 636     log_is_enabled(Debug, gc, ergo);
 637 }
 638 
 639 void AdaptiveSizePolicyOutput::print() {
 640   if (enabled()) {
 641     GC::gc()->heap()->size_policy()->print();
 642   }
 643 }
 644 
 645 void AdaptiveSizePolicyOutput::print(AdaptiveSizePolicy* size_policy, uint count) {
 646   bool do_print =
 647     enabled() &&
 648     (AdaptiveSizePolicyOutputInterval > 0) &&
 649     (count % AdaptiveSizePolicyOutputInterval) == 0;
 650 
 651   if (do_print) {
 652     size_policy->print();
 653   }
 654 }