1 /*
   2  * Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/adaptiveSizePolicy.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcCause.hpp"
  29 #include "gc/shared/workgroup.hpp"
  30 #include "runtime/timer.hpp"
  31 #include "utilities/ostream.hpp"
  32 elapsedTimer AdaptiveSizePolicy::_minor_timer;
  33 elapsedTimer AdaptiveSizePolicy::_major_timer;
  34 bool AdaptiveSizePolicy::_debug_perturbation = false;
  35 
  36 // The throughput goal is implemented as
  37 //      _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
  38 // gc_cost_ratio is the ratio
  39 //      application cost / gc cost
  40 // For example a gc_cost_ratio of 4 translates into a
  41 // throughput goal of .80
  42 
  43 AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
  44                                        size_t init_promo_size,
  45                                        size_t init_survivor_size,
  46                                        double gc_pause_goal_sec,
  47                                        uint gc_cost_ratio) :
  48     _eden_size(init_eden_size),
  49     _promo_size(init_promo_size),
  50     _survivor_size(init_survivor_size),
  51     _gc_pause_goal_sec(gc_pause_goal_sec),
  52     _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
  53     _gc_overhead_limit_exceeded(false),
  54     _print_gc_overhead_limit_would_be_exceeded(false),
  55     _gc_overhead_limit_count(0),
  56     _latest_minor_mutator_interval_seconds(0),
  57     _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
  58     _young_gen_change_for_minor_throughput(0),
  59     _old_gen_change_for_major_throughput(0) {
  60   assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
  61     "No opportunity to clear SoftReferences before GC overhead limit");
  62   _avg_minor_pause    =
  63     new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
  64   _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  65   _avg_minor_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  66   _avg_major_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  67 
  68   _avg_young_live     = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  69   _avg_old_live       = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  70   _avg_eden_live      = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  71 
  72   _avg_survived       = new AdaptivePaddedAverage(AdaptiveSizePolicyWeight,
  73                                                   SurvivorPadding);
  74   _avg_pretenured     = new AdaptivePaddedNoZeroDevAverage(
  75                                                   AdaptiveSizePolicyWeight,
  76                                                   SurvivorPadding);
  77 
  78   _minor_pause_old_estimator =
  79     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  80   _minor_pause_young_estimator =
  81     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  82   _minor_collection_estimator =
  83     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  84   _major_collection_estimator =
  85     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  86 
  87   // Start the timers
  88   _minor_timer.start();
  89 
  90   _young_gen_policy_is_ready = false;
  91 }
  92 
  93 //  If the number of GC threads was set on the command line,
  94 // use it.
  95 //  Else
  96 //    Calculate the number of GC threads based on the number of Java threads.
  97 //    Calculate the number of GC threads based on the size of the heap.
  98 //    Use the larger.
  99 
 100 uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
 101                                                      const uintx min_workers,
 102                                                      uintx active_workers,
 103                                                      uintx application_workers) {
 104   // If the user has specifically set the number of
 105   // GC threads, use them.
 106 
 107   // If the user has turned off using a dynamic number of GC threads
 108   // or the users has requested a specific number, set the active
 109   // number of workers to all the workers.
 110 
 111   uintx new_active_workers = total_workers;
 112   uintx prev_active_workers = active_workers;
 113   uintx active_workers_by_JT = 0;
 114   uintx active_workers_by_heap_size = 0;
 115 
 116   // Always use at least min_workers but use up to
 117   // GCThreadsPerJavaThreads * application threads.
 118   active_workers_by_JT =
 119     MAX2((uintx) GCWorkersPerJavaThread * application_workers,
 120          min_workers);
 121 
 122   // Choose a number of GC threads based on the current size
 123   // of the heap.  This may be complicated because the size of
 124   // the heap depends on factors such as the throughput goal.
 125   // Still a large heap should be collected by more GC threads.
 126   active_workers_by_heap_size =
 127       MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
 128 
 129   uintx max_active_workers =
 130     MAX2(active_workers_by_JT, active_workers_by_heap_size);
 131 
 132   // Limit the number of workers to the the number created,
 133   // (workers()).
 134   new_active_workers = MIN2(max_active_workers,
 135                                 (uintx) total_workers);
 136 
 137   // Increase GC workers instantly but decrease them more
 138   // slowly.
 139   if (new_active_workers < prev_active_workers) {
 140     new_active_workers =
 141       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
 142   }
 143 
 144   // Check once more that the number of workers is within the limits.
 145   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
 146   assert(new_active_workers >= min_workers, "Minimum workers not observed");
 147   assert(new_active_workers <= total_workers, "Total workers not observed");
 148 
 149   if (ForceDynamicNumberOfGCThreads) {
 150     // Assume this is debugging and jiggle the number of GC threads.
 151     if (new_active_workers == prev_active_workers) {
 152       if (new_active_workers < total_workers) {
 153         new_active_workers++;
 154       } else if (new_active_workers > min_workers) {
 155         new_active_workers--;
 156       }
 157     }
 158     if (new_active_workers == total_workers) {
 159       if (_debug_perturbation) {
 160         new_active_workers =  min_workers;
 161       }
 162       _debug_perturbation = !_debug_perturbation;
 163     }
 164     assert((new_active_workers <= ParallelGCThreads) &&
 165            (new_active_workers >= min_workers),
 166       "Jiggled active workers too much");
 167   }
 168 
 169   if (TraceDynamicGCThreads) {
 170      gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
 171        "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
 172        "prev_active_workers: " UINTX_FORMAT "\n"
 173        " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_heap_size: " UINTX_FORMAT,
 174        active_workers, new_active_workers, prev_active_workers,
 175        active_workers_by_JT, active_workers_by_heap_size);
 176   }
 177   assert(new_active_workers > 0, "Always need at least 1");
 178   return new_active_workers;
 179 }
 180 
 181 uint AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
 182                                              uintx active_workers,
 183                                              uintx application_workers) {
 184   // If the user has specifically set the number of
 185   // GC threads, use them.
 186 
 187   // If the user has turned off using a dynamic number of GC threads
 188   // or the users has requested a specific number, set the active
 189   // number of workers to all the workers.
 190 
 191   uint new_active_workers;
 192   if (!UseDynamicNumberOfGCThreads ||
 193      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
 194     new_active_workers = total_workers;
 195   } else {
 196     uintx min_workers = (total_workers == 1) ? 1 : 2;
 197     new_active_workers = calc_default_active_workers(total_workers,
 198                                                      min_workers,
 199                                                      active_workers,
 200                                                      application_workers);
 201   }
 202   assert(new_active_workers > 0, "Always need at least 1");
 203   return new_active_workers;
 204 }
 205 
 206 uint AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
 207                                                   uintx active_workers,
 208                                                   uintx application_workers) {
 209   if (!UseDynamicNumberOfGCThreads ||
 210      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
 211     return ConcGCThreads;
 212   } else {
 213     uint no_of_gc_threads = calc_default_active_workers(total_workers,
 214                                                         1, /* Minimum number of workers */
 215                                                         active_workers,
 216                                                         application_workers);
 217     return no_of_gc_threads;
 218   }
 219 }
 220 
 221 bool AdaptiveSizePolicy::tenuring_threshold_change() const {
 222   return decrement_tenuring_threshold_for_gc_cost() ||
 223          increment_tenuring_threshold_for_gc_cost() ||
 224          decrement_tenuring_threshold_for_survivor_limit();
 225 }
 226 
 227 void AdaptiveSizePolicy::minor_collection_begin() {
 228   // Update the interval time
 229   _minor_timer.stop();
 230   // Save most recent collection time
 231   _latest_minor_mutator_interval_seconds = _minor_timer.seconds();
 232   _minor_timer.reset();
 233   _minor_timer.start();
 234 }
 235 
 236 void AdaptiveSizePolicy::update_minor_pause_young_estimator(
 237     double minor_pause_in_ms) {
 238   double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
 239   _minor_pause_young_estimator->update(eden_size_in_mbytes,
 240     minor_pause_in_ms);
 241 }
 242 
 243 void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
 244   // Update the pause time.
 245   _minor_timer.stop();
 246 
 247   if (!GCCause::is_user_requested_gc(gc_cause) ||
 248       UseAdaptiveSizePolicyWithSystemGC) {
 249     double minor_pause_in_seconds = _minor_timer.seconds();
 250     double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;
 251 
 252     // Sample for performance counter
 253     _avg_minor_pause->sample(minor_pause_in_seconds);
 254 
 255     // Cost of collection (unit-less)
 256     double collection_cost = 0.0;
 257     if ((_latest_minor_mutator_interval_seconds > 0.0) &&
 258         (minor_pause_in_seconds > 0.0)) {
 259       double interval_in_seconds =
 260         _latest_minor_mutator_interval_seconds + minor_pause_in_seconds;
 261       collection_cost =
 262         minor_pause_in_seconds / interval_in_seconds;
 263       _avg_minor_gc_cost->sample(collection_cost);
 264       // Sample for performance counter
 265       _avg_minor_interval->sample(interval_in_seconds);
 266     }
 267 
 268     // The policy does not have enough data until at least some
 269     // young collections have been done.
 270     _young_gen_policy_is_ready =
 271       (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
 272 
 273     // Calculate variables used to estimate pause time vs. gen sizes
 274     double eden_size_in_mbytes = ((double)_eden_size) / ((double)M);
 275     update_minor_pause_young_estimator(minor_pause_in_ms);
 276     update_minor_pause_old_estimator(minor_pause_in_ms);
 277 
 278     if (PrintAdaptiveSizePolicy && Verbose) {
 279       gclog_or_tty->print("AdaptiveSizePolicy::minor_collection_end: "
 280                           "minor gc cost: %f  average: %f", collection_cost,
 281                           _avg_minor_gc_cost->average());
 282       gclog_or_tty->print_cr("  minor pause: %f minor period %f",
 283                              minor_pause_in_ms,
 284                              _latest_minor_mutator_interval_seconds * MILLIUNITS);
 285     }
 286 
 287     // Calculate variable used to estimate collection cost vs. gen sizes
 288     assert(collection_cost >= 0.0, "Expected to be non-negative");
 289     _minor_collection_estimator->update(eden_size_in_mbytes, collection_cost);
 290   }
 291 
 292   // Interval times use this timer to measure the mutator time.
 293   // Reset the timer after the GC pause.
 294   _minor_timer.reset();
 295   _minor_timer.start();
 296 }
 297 
 298 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, uint percent_change) {
 299   size_t eden_heap_delta;
 300   eden_heap_delta = cur_eden / 100 * percent_change;
 301   return eden_heap_delta;
 302 }
 303 
 304 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden) {
 305   return eden_increment(cur_eden, YoungGenerationSizeIncrement);
 306 }
 307 
 308 size_t AdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
 309   size_t eden_heap_delta = eden_increment(cur_eden) /
 310     AdaptiveSizeDecrementScaleFactor;
 311   return eden_heap_delta;
 312 }
 313 
 314 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, uint percent_change) {
 315   size_t promo_heap_delta;
 316   promo_heap_delta = cur_promo / 100 * percent_change;
 317   return promo_heap_delta;
 318 }
 319 
 320 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo) {
 321   return promo_increment(cur_promo, TenuredGenerationSizeIncrement);
 322 }
 323 
 324 size_t AdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
 325   size_t promo_heap_delta = promo_increment(cur_promo);
 326   promo_heap_delta = promo_heap_delta / AdaptiveSizeDecrementScaleFactor;
 327   return promo_heap_delta;
 328 }
 329 
 330 double AdaptiveSizePolicy::time_since_major_gc() const {
 331   _major_timer.stop();
 332   double result = _major_timer.seconds();
 333   _major_timer.start();
 334   return result;
 335 }
 336 
 337 // Linear decay of major gc cost
 338 double AdaptiveSizePolicy::decaying_major_gc_cost() const {
 339   double major_interval = major_gc_interval_average_for_decay();
 340   double major_gc_cost_average = major_gc_cost();
 341   double decayed_major_gc_cost = major_gc_cost_average;
 342   if(time_since_major_gc() > 0.0) {
 343     decayed_major_gc_cost = major_gc_cost() *
 344       (((double) AdaptiveSizeMajorGCDecayTimeScale) * major_interval)
 345       / time_since_major_gc();
 346   }
 347 
 348   // The decayed cost should always be smaller than the
 349   // average cost but the vagaries of finite arithmetic could
 350   // produce a larger value in decayed_major_gc_cost so protect
 351   // against that.
 352   return MIN2(major_gc_cost_average, decayed_major_gc_cost);
 353 }
 354 
 355 // Use a value of the major gc cost that has been decayed
 356 // by the factor
 357 //
 358 //      average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
 359 //        time-since-last-major-gc
 360 //
 361 // if the average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale
 362 // is less than time-since-last-major-gc.
 363 //
 364 // In cases where there are initial major gc's that
 365 // are of a relatively high cost but no later major
 366 // gc's, the total gc cost can remain high because
 367 // the major gc cost remains unchanged (since there are no major
 368 // gc's).  In such a situation the value of the unchanging
 369 // major gc cost can keep the mutator throughput below
 370 // the goal when in fact the major gc cost is becoming diminishingly
 371 // small.  Use the decaying gc cost only to decide whether to
 372 // adjust for throughput.  Using it also to determine the adjustment
 373 // to be made for throughput also seems reasonable but there is
 374 // no test case to use to decide if it is the right thing to do
 375 // don't do it yet.
 376 
 377 double AdaptiveSizePolicy::decaying_gc_cost() const {
 378   double decayed_major_gc_cost = major_gc_cost();
 379   double avg_major_interval = major_gc_interval_average_for_decay();
 380   if (UseAdaptiveSizeDecayMajorGCCost &&
 381       (AdaptiveSizeMajorGCDecayTimeScale > 0) &&
 382       (avg_major_interval > 0.00)) {
 383     double time_since_last_major_gc = time_since_major_gc();
 384 
 385     // Decay the major gc cost?
 386     if (time_since_last_major_gc >
 387         ((double) AdaptiveSizeMajorGCDecayTimeScale) * avg_major_interval) {
 388 
 389       // Decay using the time-since-last-major-gc
 390       decayed_major_gc_cost = decaying_major_gc_cost();
 391       if (PrintGCDetails && Verbose) {
 392         gclog_or_tty->print_cr("\ndecaying_gc_cost: major interval average:"
 393           " %f  time since last major gc: %f",
 394           avg_major_interval, time_since_last_major_gc);
 395         gclog_or_tty->print_cr("  major gc cost: %f  decayed major gc cost: %f",
 396           major_gc_cost(), decayed_major_gc_cost);
 397       }
 398     }
 399   }
 400   double result = MIN2(1.0, decayed_major_gc_cost + minor_gc_cost());
 401   return result;
 402 }
 403 
 404 
 405 void AdaptiveSizePolicy::clear_generation_free_space_flags() {
 406   set_change_young_gen_for_min_pauses(0);
 407   set_change_old_gen_for_maj_pauses(0);
 408 
 409   set_change_old_gen_for_throughput(0);
 410   set_change_young_gen_for_throughput(0);
 411   set_decrease_for_footprint(0);
 412   set_decide_at_full_gc(0);
 413 }
 414 
 415 void AdaptiveSizePolicy::check_gc_overhead_limit(
 416                                           size_t young_live,
 417                                           size_t eden_live,
 418                                           size_t max_old_gen_size,
 419                                           size_t max_eden_size,
 420                                           bool   is_full_gc,
 421                                           GCCause::Cause gc_cause,
 422                                           CollectorPolicy* collector_policy) {
 423 
 424   // Ignore explicit GC's.  Exiting here does not set the flag and
 425   // does not reset the count.  Updating of the averages for system
 426   // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
 427   if (GCCause::is_user_requested_gc(gc_cause) ||
 428       GCCause::is_serviceability_requested_gc(gc_cause)) {
 429     return;
 430   }
 431   // eden_limit is the upper limit on the size of eden based on
 432   // the maximum size of the young generation and the sizes
 433   // of the survivor space.
 434   // The question being asked is whether the gc costs are high
 435   // and the space being recovered by a collection is low.
 436   // free_in_young_gen is the free space in the young generation
 437   // after a collection and promo_live is the free space in the old
 438   // generation after a collection.
 439   //
 440   // Use the minimum of the current value of the live in the
 441   // young gen or the average of the live in the young gen.
 442   // If the current value drops quickly, that should be taken
 443   // into account (i.e., don't trigger if the amount of free
 444   // space has suddenly jumped up).  If the current is much
 445   // higher than the average, use the average since it represents
 446   // the longer term behavior.
 447   const size_t live_in_eden =
 448     MIN2(eden_live, (size_t) avg_eden_live()->average());
 449   const size_t free_in_eden = max_eden_size > live_in_eden ?
 450     max_eden_size - live_in_eden : 0;
 451   const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
 452   const size_t total_free_limit = free_in_old_gen + free_in_eden;
 453   const size_t total_mem = max_old_gen_size + max_eden_size;
 454   const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
 455   const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
 456   const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
 457   const double gc_cost_limit = GCTimeLimit/100.0;
 458   size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
 459   // But don't force a promo size below the current promo size. Otherwise,
 460   // the promo size will shrink for no good reason.
 461   promo_limit = MAX2(promo_limit, _promo_size);
 462 
 463 
 464   if (PrintAdaptiveSizePolicy && (Verbose ||
 465       (free_in_old_gen < (size_t) mem_free_old_limit &&
 466        free_in_eden < (size_t) mem_free_eden_limit))) {
 467     gclog_or_tty->print_cr(
 468           "PSAdaptiveSizePolicy::check_gc_overhead_limit:"
 469           " promo_limit: " SIZE_FORMAT
 470           " max_eden_size: " SIZE_FORMAT
 471           " total_free_limit: " SIZE_FORMAT
 472           " max_old_gen_size: " SIZE_FORMAT
 473           " max_eden_size: " SIZE_FORMAT
 474           " mem_free_limit: " SIZE_FORMAT,
 475           promo_limit, max_eden_size, total_free_limit,
 476           max_old_gen_size, max_eden_size,
 477           (size_t) mem_free_limit);
 478   }
 479 
 480   bool print_gc_overhead_limit_would_be_exceeded = false;
 481   if (is_full_gc) {
 482     if (gc_cost() > gc_cost_limit &&
 483       free_in_old_gen < (size_t) mem_free_old_limit &&
 484       free_in_eden < (size_t) mem_free_eden_limit) {
 485       // Collections, on average, are taking too much time, and
 486       //      gc_cost() > gc_cost_limit
 487       // we have too little space available after a full gc.
 488       //      total_free_limit < mem_free_limit
 489       // where
 490       //   total_free_limit is the free space available in
 491       //     both generations
 492       //   total_mem is the total space available for allocation
 493       //     in both generations (survivor spaces are not included
 494       //     just as they are not included in eden_limit).
 495       //   mem_free_limit is a fraction of total_mem judged to be an
 496       //     acceptable amount that is still unused.
 497       // The heap can ask for the value of this variable when deciding
 498       // whether to thrown an OutOfMemory error.
 499       // Note that the gc time limit test only works for the collections
 500       // of the young gen + tenured gen and not for collections of the
 501       // permanent gen.  That is because the calculation of the space
 502       // freed by the collection is the free space in the young gen +
 503       // tenured gen.
 504       // At this point the GC overhead limit is being exceeded.
 505       inc_gc_overhead_limit_count();
 506       if (UseGCOverheadLimit) {
 507         if (gc_overhead_limit_count() >=
 508             AdaptiveSizePolicyGCTimeLimitThreshold){
 509           // All conditions have been met for throwing an out-of-memory
 510           set_gc_overhead_limit_exceeded(true);
 511           // Avoid consecutive OOM due to the gc time limit by resetting
 512           // the counter.
 513           reset_gc_overhead_limit_count();
 514         } else {
 515           // The required consecutive collections which exceed the
 516           // GC time limit may or may not have been reached. We
 517           // are approaching that condition and so as not to
 518           // throw an out-of-memory before all SoftRef's have been
 519           // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
 520           // The clearing will be done on the next GC.
 521           bool near_limit = gc_overhead_limit_near();
 522           if (near_limit) {
 523             collector_policy->set_should_clear_all_soft_refs(true);
 524             if (PrintGCDetails && Verbose) {
 525               gclog_or_tty->print_cr("  Nearing GC overhead limit, "
 526                 "will be clearing all SoftReference");
 527             }
 528           }
 529         }
 530       }
 531       // Set this even when the overhead limit will not
 532       // cause an out-of-memory.  Diagnostic message indicating
 533       // that the overhead limit is being exceeded is sometimes
 534       // printed.
 535       print_gc_overhead_limit_would_be_exceeded = true;
 536 
 537     } else {
 538       // Did not exceed overhead limits
 539       reset_gc_overhead_limit_count();
 540     }
 541   }
 542 
 543   if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
 544     if (gc_overhead_limit_exceeded()) {
 545       gclog_or_tty->print_cr("      GC is exceeding overhead limit "
 546         "of " UINTX_FORMAT "%%", GCTimeLimit);
 547       reset_gc_overhead_limit_count();
 548     } else if (print_gc_overhead_limit_would_be_exceeded) {
 549       assert(gc_overhead_limit_count() > 0, "Should not be printing");
 550       gclog_or_tty->print_cr("      GC would exceed overhead limit "
 551         "of " UINTX_FORMAT "%% %d consecutive time(s)",
 552         GCTimeLimit, gc_overhead_limit_count());
 553     }
 554   }
 555 }
 556 // Printing
 557 
 558 bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const {
 559 
 560   //  Should only be used with adaptive size policy turned on.
 561   // Otherwise, there may be variables that are undefined.
 562   if (!UseAdaptiveSizePolicy) return false;
 563 
 564   // Print goal for which action is needed.
 565   char* action = NULL;
 566   bool change_for_pause = false;
 567   if ((change_old_gen_for_maj_pauses() ==
 568          decrease_old_gen_for_maj_pauses_true) ||
 569       (change_young_gen_for_min_pauses() ==
 570          decrease_young_gen_for_min_pauses_true)) {
 571     action = (char*) " *** pause time goal ***";
 572     change_for_pause = true;
 573   } else if ((change_old_gen_for_throughput() ==
 574                increase_old_gen_for_throughput_true) ||
 575             (change_young_gen_for_throughput() ==
 576                increase_young_gen_for_througput_true)) {
 577     action = (char*) " *** throughput goal ***";
 578   } else if (decrease_for_footprint()) {
 579     action = (char*) " *** reduced footprint ***";
 580   } else {
 581     // No actions were taken.  This can legitimately be the
 582     // situation if not enough data has been gathered to make
 583     // decisions.
 584     return false;
 585   }
 586 
 587   // Pauses
 588   // Currently the size of the old gen is only adjusted to
 589   // change the major pause times.
 590   char* young_gen_action = NULL;
 591   char* tenured_gen_action = NULL;
 592 
 593   char* shrink_msg = (char*) "(attempted to shrink)";
 594   char* grow_msg = (char*) "(attempted to grow)";
 595   char* no_change_msg = (char*) "(no change)";
 596   if (change_young_gen_for_min_pauses() ==
 597       decrease_young_gen_for_min_pauses_true) {
 598     young_gen_action = shrink_msg;
 599   } else if (change_for_pause) {
 600     young_gen_action = no_change_msg;
 601   }
 602 
 603   if (change_old_gen_for_maj_pauses() == decrease_old_gen_for_maj_pauses_true) {
 604     tenured_gen_action = shrink_msg;
 605   } else if (change_for_pause) {
 606     tenured_gen_action = no_change_msg;
 607   }
 608 
 609   // Throughput
 610   if (change_old_gen_for_throughput() == increase_old_gen_for_throughput_true) {
 611     assert(change_young_gen_for_throughput() ==
 612            increase_young_gen_for_througput_true,
 613            "Both generations should be growing");
 614     young_gen_action = grow_msg;
 615     tenured_gen_action = grow_msg;
 616   } else if (change_young_gen_for_throughput() ==
 617              increase_young_gen_for_througput_true) {
 618     // Only the young generation may grow at start up (before
 619     // enough full collections have been done to grow the old generation).
 620     young_gen_action = grow_msg;
 621     tenured_gen_action = no_change_msg;
 622   }
 623 
 624   // Minimum footprint
 625   if (decrease_for_footprint() != 0) {
 626     young_gen_action = shrink_msg;
 627     tenured_gen_action = shrink_msg;
 628   }
 629 
 630   st->print_cr("    UseAdaptiveSizePolicy actions to meet %s", action);
 631   st->print_cr("                       GC overhead (%%)");
 632   st->print_cr("    Young generation:     %7.2f\t  %s",
 633     100.0 * avg_minor_gc_cost()->average(),
 634     young_gen_action);
 635   st->print_cr("    Tenured generation:   %7.2f\t  %s",
 636     100.0 * avg_major_gc_cost()->average(),
 637     tenured_gen_action);
 638   return true;
 639 }
 640 
 641 bool AdaptiveSizePolicy::print_adaptive_size_policy_on(
 642                                             outputStream* st,
 643                                             uint tenuring_threshold_arg) const {
 644   if (!AdaptiveSizePolicy::print_adaptive_size_policy_on(st)) {
 645     return false;
 646   }
 647 
 648   // Tenuring threshold
 649   bool tenuring_threshold_changed = true;
 650   if (decrement_tenuring_threshold_for_survivor_limit()) {
 651     st->print("    Tenuring threshold:    (attempted to decrease to avoid"
 652               " survivor space overflow) = ");
 653   } else if (decrement_tenuring_threshold_for_gc_cost()) {
 654     st->print("    Tenuring threshold:    (attempted to decrease to balance"
 655               " GC costs) = ");
 656   } else if (increment_tenuring_threshold_for_gc_cost()) {
 657     st->print("    Tenuring threshold:    (attempted to increase to balance"
 658               " GC costs) = ");
 659   } else {
 660     tenuring_threshold_changed = false;
 661     assert(!tenuring_threshold_change(), "(no change was attempted)");
 662   }
 663   if (tenuring_threshold_changed) {
 664     st->print_cr("%u", tenuring_threshold_arg);
 665   }
 666   return true;
 667 }