1 /*
   2  * Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/adaptiveSizePolicy.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcCause.hpp"
  29 #include "gc/shared/gcUtil.inline.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "logging/log.hpp"
  32 #include "runtime/timer.hpp"
  33 #include "utilities/ostream.hpp"
  34 
  35 elapsedTimer AdaptiveSizePolicy::_minor_timer;
  36 elapsedTimer AdaptiveSizePolicy::_major_timer;
  37 bool AdaptiveSizePolicy::_debug_perturbation = false;
  38 
  39 // The throughput goal is implemented as
  40 //      _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
  41 // gc_cost_ratio is the ratio
  42 //      application cost / gc cost
  43 // For example a gc_cost_ratio of 4 translates into a
  44 // throughput goal of .80
  45 
  46 AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
  47                                        size_t init_promo_size,
  48                                        size_t init_survivor_size,
  49                                        double gc_pause_goal_sec,
  50                                        uint gc_cost_ratio) :
  51     _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
  52     _eden_size(init_eden_size),
  53     _promo_size(init_promo_size),
  54     _survivor_size(init_survivor_size),
  55     _latest_minor_mutator_interval_seconds(0),
  56     _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
  57     _gc_pause_goal_sec(gc_pause_goal_sec),
  58     _young_gen_change_for_minor_throughput(0),
  59     _old_gen_change_for_major_throughput(0) {
  60   assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
  61     "No opportunity to clear SoftReferences before GC overhead limit");
  62   _avg_minor_pause    =
  63     new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
  64   _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  65   _avg_minor_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  66   _avg_major_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
  67 
  68   _avg_young_live     = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  69   _avg_old_live       = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  70   _avg_eden_live      = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
  71 
  72   _avg_survived       = new AdaptivePaddedAverage(AdaptiveSizePolicyWeight,
  73                                                   SurvivorPadding);
  74   _avg_pretenured     = new AdaptivePaddedNoZeroDevAverage(
  75                                                   AdaptiveSizePolicyWeight,
  76                                                   SurvivorPadding);
  77 
  78   _minor_pause_old_estimator =
  79     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  80   _minor_pause_young_estimator =
  81     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  82   _minor_collection_estimator =
  83     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  84   _major_collection_estimator =
  85     new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
  86 
  87   // Start the timers
  88   _minor_timer.start();
  89 
  90   _young_gen_policy_is_ready = false;
  91 }
  92 
  93 //  If the number of GC threads was set on the command line,
  94 // use it.
  95 //  Else
  96 //    Calculate the number of GC threads based on the number of Java threads.
  97 //    Calculate the number of GC threads based on the size of the heap.
  98 //    Use the larger.
  99 
 100 uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
 101                                                      const uintx min_workers,
 102                                                      uintx active_workers,
 103                                                      uintx application_workers) {
 104   // If the user has specifically set the number of
 105   // GC threads, use them.
 106 
 107   // If the user has turned off using a dynamic number of GC threads
 108   // or the users has requested a specific number, set the active
 109   // number of workers to all the workers.
 110 
 111   uintx new_active_workers = total_workers;
 112   uintx prev_active_workers = active_workers;
 113   uintx active_workers_by_JT = 0;
 114   uintx active_workers_by_heap_size = 0;
 115 
 116   // Always use at least min_workers but use up to
 117   // GCThreadsPerJavaThreads * application threads.
 118   active_workers_by_JT =
 119     MAX2((uintx) GCWorkersPerJavaThread * application_workers,
 120          min_workers);
 121 
 122   // Choose a number of GC threads based on the current size
 123   // of the heap.  This may be complicated because the size of
 124   // the heap depends on factors such as the throughput goal.
 125   // Still a large heap should be collected by more GC threads.
 126   active_workers_by_heap_size =
 127       MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
 128 
 129   uintx max_active_workers =
 130     MAX2(active_workers_by_JT, active_workers_by_heap_size);
 131 
 132   new_active_workers = MIN2(max_active_workers, (uintx) total_workers);
 133 
 134   // Increase GC workers instantly but decrease them more
 135   // slowly.
 136   if (new_active_workers < prev_active_workers) {
 137     new_active_workers =
 138       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
 139   }
 140 
 141   // Check once more that the number of workers is within the limits.
 142   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
 143   assert(new_active_workers >= min_workers, "Minimum workers not observed");
 144   assert(new_active_workers <= total_workers, "Total workers not observed");
 145 
 146   if (ForceDynamicNumberOfGCThreads) {
 147     // Assume this is debugging and jiggle the number of GC threads.
 148     if (new_active_workers == prev_active_workers) {
 149       if (new_active_workers < total_workers) {
 150         new_active_workers++;
 151       } else if (new_active_workers > min_workers) {
 152         new_active_workers--;
 153       }
 154     }
 155     if (new_active_workers == total_workers) {
 156       if (_debug_perturbation) {
 157         new_active_workers =  min_workers;
 158       }
 159       _debug_perturbation = !_debug_perturbation;
 160     }
 161     assert((new_active_workers <= ParallelGCThreads) &&
 162            (new_active_workers >= min_workers),
 163       "Jiggled active workers too much");
 164   }
 165 
 166   log_trace(gc, task)("GCTaskManager::calc_default_active_workers() : "
 167      "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
 168      "prev_active_workers: " UINTX_FORMAT "\n"
 169      " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_heap_size: " UINTX_FORMAT,
 170      active_workers, new_active_workers, prev_active_workers,
 171      active_workers_by_JT, active_workers_by_heap_size);
 172   assert(new_active_workers > 0, "Always need at least 1");
 173   return new_active_workers;
 174 }
 175 
 176 uint AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
 177                                              uintx active_workers,
 178                                              uintx application_workers) {
 179   // If the user has specifically set the number of
 180   // GC threads, use them.
 181 
 182   // If the user has turned off using a dynamic number of GC threads
 183   // or the users has requested a specific number, set the active
 184   // number of workers to all the workers.
 185 
 186   uint new_active_workers;
 187   if (!UseDynamicNumberOfGCThreads ||
 188      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
 189     new_active_workers = total_workers;
 190   } else {
 191     uintx min_workers = (total_workers == 1) ? 1 : 2;
 192     new_active_workers = calc_default_active_workers(total_workers,
 193                                                      min_workers,
 194                                                      active_workers,
 195                                                      application_workers);
 196   }
 197   assert(new_active_workers > 0, "Always need at least 1");
 198   return new_active_workers;
 199 }
 200 
 201 uint AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
 202                                                   uintx active_workers,
 203                                                   uintx application_workers) {
 204   if (!UseDynamicNumberOfGCThreads ||
 205      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
 206     return ConcGCThreads;
 207   } else {
 208     uint no_of_gc_threads = calc_default_active_workers(total_workers,
 209                                                         1, /* Minimum number of workers */
 210                                                         active_workers,
 211                                                         application_workers);
 212     return no_of_gc_threads;
 213   }
 214 }
 215 
 216 bool AdaptiveSizePolicy::tenuring_threshold_change() const {
 217   return decrement_tenuring_threshold_for_gc_cost() ||
 218          increment_tenuring_threshold_for_gc_cost() ||
 219          decrement_tenuring_threshold_for_survivor_limit();
 220 }
 221 
 222 void AdaptiveSizePolicy::minor_collection_begin() {
 223   // Update the interval time
 224   _minor_timer.stop();
 225   // Save most recent collection time
 226   _latest_minor_mutator_interval_seconds = _minor_timer.seconds();
 227   _minor_timer.reset();
 228   _minor_timer.start();
 229 }
 230 
 231 void AdaptiveSizePolicy::update_minor_pause_young_estimator(
 232     double minor_pause_in_ms) {
 233   double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
 234   _minor_pause_young_estimator->update(eden_size_in_mbytes,
 235     minor_pause_in_ms);
 236 }
 237 
 238 void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
 239   // Update the pause time.
 240   _minor_timer.stop();
 241 
 242   if (!GCCause::is_user_requested_gc(gc_cause) ||
 243       UseAdaptiveSizePolicyWithSystemGC) {
 244     double minor_pause_in_seconds = _minor_timer.seconds();
 245     double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;
 246 
 247     // Sample for performance counter
 248     _avg_minor_pause->sample(minor_pause_in_seconds);
 249 
 250     // Cost of collection (unit-less)
 251     double collection_cost = 0.0;
 252     if ((_latest_minor_mutator_interval_seconds > 0.0) &&
 253         (minor_pause_in_seconds > 0.0)) {
 254       double interval_in_seconds =
 255         _latest_minor_mutator_interval_seconds + minor_pause_in_seconds;
 256       collection_cost =
 257         minor_pause_in_seconds / interval_in_seconds;
 258       _avg_minor_gc_cost->sample(collection_cost);
 259       // Sample for performance counter
 260       _avg_minor_interval->sample(interval_in_seconds);
 261     }
 262 
 263     // The policy does not have enough data until at least some
 264     // young collections have been done.
 265     _young_gen_policy_is_ready =
 266       (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
 267 
 268     // Calculate variables used to estimate pause time vs. gen sizes
 269     double eden_size_in_mbytes = ((double)_eden_size) / ((double)M);
 270     update_minor_pause_young_estimator(minor_pause_in_ms);
 271     update_minor_pause_old_estimator(minor_pause_in_ms);
 272 
 273     log_trace(gc, ergo)("AdaptiveSizePolicy::minor_collection_end: minor gc cost: %f  average: %f",
 274                         collection_cost, _avg_minor_gc_cost->average());
 275     log_trace(gc, ergo)("  minor pause: %f minor period %f",
 276                         minor_pause_in_ms, _latest_minor_mutator_interval_seconds * MILLIUNITS);
 277 
 278     // Calculate variable used to estimate collection cost vs. gen sizes
 279     assert(collection_cost >= 0.0, "Expected to be non-negative");
 280     _minor_collection_estimator->update(eden_size_in_mbytes, collection_cost);
 281   }
 282 
 283   // Interval times use this timer to measure the mutator time.
 284   // Reset the timer after the GC pause.
 285   _minor_timer.reset();
 286   _minor_timer.start();
 287 }
 288 
 289 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, uint percent_change) {
 290   size_t eden_heap_delta;
 291   eden_heap_delta = cur_eden / 100 * percent_change;
 292   return eden_heap_delta;
 293 }
 294 
 295 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden) {
 296   return eden_increment(cur_eden, YoungGenerationSizeIncrement);
 297 }
 298 
 299 size_t AdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
 300   size_t eden_heap_delta = eden_increment(cur_eden) /
 301     AdaptiveSizeDecrementScaleFactor;
 302   return eden_heap_delta;
 303 }
 304 
 305 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, uint percent_change) {
 306   size_t promo_heap_delta;
 307   promo_heap_delta = cur_promo / 100 * percent_change;
 308   return promo_heap_delta;
 309 }
 310 
 311 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo) {
 312   return promo_increment(cur_promo, TenuredGenerationSizeIncrement);
 313 }
 314 
 315 size_t AdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
 316   size_t promo_heap_delta = promo_increment(cur_promo);
 317   promo_heap_delta = promo_heap_delta / AdaptiveSizeDecrementScaleFactor;
 318   return promo_heap_delta;
 319 }
 320 
 321 double AdaptiveSizePolicy::time_since_major_gc() const {
 322   _major_timer.stop();
 323   double result = _major_timer.seconds();
 324   _major_timer.start();
 325   return result;
 326 }
 327 
 328 // Linear decay of major gc cost
 329 double AdaptiveSizePolicy::decaying_major_gc_cost() const {
 330   double major_interval = major_gc_interval_average_for_decay();
 331   double major_gc_cost_average = major_gc_cost();
 332   double decayed_major_gc_cost = major_gc_cost_average;
 333   if(time_since_major_gc() > 0.0) {
 334     decayed_major_gc_cost = major_gc_cost() *
 335       (((double) AdaptiveSizeMajorGCDecayTimeScale) * major_interval)
 336       / time_since_major_gc();
 337   }
 338 
 339   // The decayed cost should always be smaller than the
 340   // average cost but the vagaries of finite arithmetic could
 341   // produce a larger value in decayed_major_gc_cost so protect
 342   // against that.
 343   return MIN2(major_gc_cost_average, decayed_major_gc_cost);
 344 }
 345 
 346 // Use a value of the major gc cost that has been decayed
 347 // by the factor
 348 //
 349 //      average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
 350 //        time-since-last-major-gc
 351 //
 352 // if the average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale
 353 // is less than time-since-last-major-gc.
 354 //
 355 // In cases where there are initial major gc's that
 356 // are of a relatively high cost but no later major
 357 // gc's, the total gc cost can remain high because
 358 // the major gc cost remains unchanged (since there are no major
 359 // gc's).  In such a situation the value of the unchanging
 360 // major gc cost can keep the mutator throughput below
 361 // the goal when in fact the major gc cost is becoming diminishingly
 362 // small.  Use the decaying gc cost only to decide whether to
 363 // adjust for throughput.  Using it also to determine the adjustment
 364 // to be made for throughput also seems reasonable but there is
 365 // no test case to use to decide if it is the right thing to do
 366 // don't do it yet.
 367 
 368 double AdaptiveSizePolicy::decaying_gc_cost() const {
 369   double decayed_major_gc_cost = major_gc_cost();
 370   double avg_major_interval = major_gc_interval_average_for_decay();
 371   if (UseAdaptiveSizeDecayMajorGCCost &&
 372       (AdaptiveSizeMajorGCDecayTimeScale > 0) &&
 373       (avg_major_interval > 0.00)) {
 374     double time_since_last_major_gc = time_since_major_gc();
 375 
 376     // Decay the major gc cost?
 377     if (time_since_last_major_gc >
 378         ((double) AdaptiveSizeMajorGCDecayTimeScale) * avg_major_interval) {
 379 
 380       // Decay using the time-since-last-major-gc
 381       decayed_major_gc_cost = decaying_major_gc_cost();
 382       log_trace(gc, ergo)("decaying_gc_cost: major interval average: %f  time since last major gc: %f",
 383                     avg_major_interval, time_since_last_major_gc);
 384       log_trace(gc, ergo)("  major gc cost: %f  decayed major gc cost: %f",
 385                     major_gc_cost(), decayed_major_gc_cost);
 386     }
 387   }
 388   double result = MIN2(1.0, decayed_major_gc_cost + minor_gc_cost());
 389   return result;
 390 }
 391 
 392 
 393 void AdaptiveSizePolicy::clear_generation_free_space_flags() {
 394   set_change_young_gen_for_min_pauses(0);
 395   set_change_old_gen_for_maj_pauses(0);
 396 
 397   set_change_old_gen_for_throughput(0);
 398   set_change_young_gen_for_throughput(0);
 399   set_decrease_for_footprint(0);
 400   set_decide_at_full_gc(0);
 401 }
 402 
 403 class AdaptiveSizePolicyTimeOverheadTester: public OverheadTester {
 404   double _gc_cost;
 405 
 406  public:
 407   AdaptiveSizePolicyTimeOverheadTester(double gc_cost) : _gc_cost(gc_cost) {}
 408 
 409   bool is_exceeded() {
 410     // Note that the gc time limit test only works for the collections
 411     // of the young gen + tenured gen and not for collections of the
 412     // permanent gen.  That is because the calculation of the space
 413     // freed by the collection is the free space in the young gen +
 414     // tenured gen.
 415     return _gc_cost > (GCTimeLimit / 100.0);
 416   }
 417 };
 418 
 419 class AdaptiveSizePolicySpaceOverheadTester: public OverheadTester {
 420   size_t _eden_live;
 421   size_t _max_old_gen_size;
 422   size_t _max_eden_size;
 423   size_t _promo_size;
 424   double _avg_eden_live;
 425   double _avg_old_live;
 426 
 427  public:
 428   AdaptiveSizePolicySpaceOverheadTester(size_t eden_live,
 429                                         size_t max_old_gen_size,
 430                                         size_t max_eden_size,
 431                                         size_t promo_size,
 432                                         double avg_eden_live,
 433                                         double avg_old_live) :
 434     _eden_live(eden_live),
 435     _max_old_gen_size(max_old_gen_size),
 436     _max_eden_size(max_eden_size),
 437     _promo_size(promo_size),
 438     _avg_eden_live(avg_eden_live),
 439     _avg_old_live(avg_old_live) {}
 440 
 441   bool is_exceeded() {
 442     // _max_eden_size is the upper limit on the size of eden based on
 443     // the maximum size of the young generation and the sizes
 444     // of the survivor space.
 445     // The question being asked is whether the space being recovered by
 446     // a collection is low.
 447     // free_in_eden is the free space in eden after a collection and
 448     // free_in_old_gen is the free space in the old generation after
 449     // a collection.
 450     //
 451     // Use the minimum of the current value of the live in eden
 452     // or the average of the live in eden.
 453     // If the current value drops quickly, that should be taken
 454     // into account (i.e., don't trigger if the amount of free
 455     // space has suddenly jumped up).  If the current is much
 456     // higher than the average, use the average since it represents
 457     // the longer term behavior.
 458     const size_t live_in_eden =
 459       MIN2(_eden_live, (size_t)_avg_eden_live);
 460     const size_t free_in_eden = _max_eden_size > live_in_eden ?
 461       _max_eden_size - live_in_eden : 0;
 462     const size_t free_in_old_gen = (size_t)(_max_old_gen_size - _avg_old_live);
 463     const size_t total_free_limit = free_in_old_gen + free_in_eden;
 464     const size_t total_mem = _max_old_gen_size + _max_eden_size;
 465     const double free_limit_ratio = GCHeapFreeLimit / 100.0;
 466     const double mem_free_limit = total_mem * free_limit_ratio;
 467     const double mem_free_old_limit = _max_old_gen_size * free_limit_ratio;
 468     const double mem_free_eden_limit = _max_eden_size * free_limit_ratio;
 469     size_t promo_limit = (size_t)(_max_old_gen_size - _avg_old_live);
 470     // But don't force a promo size below the current promo size. Otherwise,
 471     // the promo size will shrink for no good reason.
 472     promo_limit = MAX2(promo_limit, _promo_size);
 473 
 474     log_trace(gc, ergo)(
 475           "AdaptiveSizePolicySpaceOverheadTester::is_exceeded:"
 476           " promo_limit: " SIZE_FORMAT
 477           " max_eden_size: " SIZE_FORMAT
 478           " total_free_limit: " SIZE_FORMAT
 479           " max_old_gen_size: " SIZE_FORMAT
 480           " max_eden_size: " SIZE_FORMAT
 481           " mem_free_limit: " SIZE_FORMAT,
 482           promo_limit, _max_eden_size, total_free_limit,
 483           _max_old_gen_size, _max_eden_size,
 484           (size_t)mem_free_limit);
 485 
 486     return free_in_old_gen < (size_t)mem_free_old_limit &&
 487            free_in_eden < (size_t)mem_free_eden_limit;
 488   }
 489 
 490 };
 491 
 492 void AdaptiveSizePolicy::check_gc_overhead_limit(
 493                                           size_t eden_live,
 494                                           size_t max_old_gen_size,
 495                                           size_t max_eden_size,
 496                                           bool   is_full_gc,
 497                                           GCCause::Cause gc_cause,
 498                                           SoftRefPolicy* soft_ref_policy) {
 499 
 500   AdaptiveSizePolicyTimeOverheadTester time_overhead(gc_cost());
 501   AdaptiveSizePolicySpaceOverheadTester space_overhead(eden_live,
 502                                                        max_old_gen_size,
 503                                                        max_eden_size,
 504                                                        _promo_size,
 505                                                        avg_eden_live()->average(),
 506                                                        avg_old_live()->average());
 507   _overhead_checker.check_gc_overhead_limit(&time_overhead,
 508                                             &space_overhead,
 509                                             is_full_gc,
 510                                             gc_cause,
 511                                             soft_ref_policy);
 512 }
 513 // Printing
 514 
 515 bool AdaptiveSizePolicy::print() const {
 516   assert(UseAdaptiveSizePolicy, "UseAdaptiveSizePolicy need to be enabled.");
 517 
 518   if (!log_is_enabled(Debug, gc, ergo)) {
 519     return false;
 520   }
 521 
 522   // Print goal for which action is needed.
 523   char* action = NULL;
 524   bool change_for_pause = false;
 525   if ((change_old_gen_for_maj_pauses() ==
 526          decrease_old_gen_for_maj_pauses_true) ||
 527       (change_young_gen_for_min_pauses() ==
 528          decrease_young_gen_for_min_pauses_true)) {
 529     action = (char*) " *** pause time goal ***";
 530     change_for_pause = true;
 531   } else if ((change_old_gen_for_throughput() ==
 532                increase_old_gen_for_throughput_true) ||
 533             (change_young_gen_for_throughput() ==
 534                increase_young_gen_for_througput_true)) {
 535     action = (char*) " *** throughput goal ***";
 536   } else if (decrease_for_footprint()) {
 537     action = (char*) " *** reduced footprint ***";
 538   } else {
 539     // No actions were taken.  This can legitimately be the
 540     // situation if not enough data has been gathered to make
 541     // decisions.
 542     return false;
 543   }
 544 
 545   // Pauses
 546   // Currently the size of the old gen is only adjusted to
 547   // change the major pause times.
 548   char* young_gen_action = NULL;
 549   char* tenured_gen_action = NULL;
 550 
 551   char* shrink_msg = (char*) "(attempted to shrink)";
 552   char* grow_msg = (char*) "(attempted to grow)";
 553   char* no_change_msg = (char*) "(no change)";
 554   if (change_young_gen_for_min_pauses() ==
 555       decrease_young_gen_for_min_pauses_true) {
 556     young_gen_action = shrink_msg;
 557   } else if (change_for_pause) {
 558     young_gen_action = no_change_msg;
 559   }
 560 
 561   if (change_old_gen_for_maj_pauses() == decrease_old_gen_for_maj_pauses_true) {
 562     tenured_gen_action = shrink_msg;
 563   } else if (change_for_pause) {
 564     tenured_gen_action = no_change_msg;
 565   }
 566 
 567   // Throughput
 568   if (change_old_gen_for_throughput() == increase_old_gen_for_throughput_true) {
 569     assert(change_young_gen_for_throughput() ==
 570            increase_young_gen_for_througput_true,
 571            "Both generations should be growing");
 572     young_gen_action = grow_msg;
 573     tenured_gen_action = grow_msg;
 574   } else if (change_young_gen_for_throughput() ==
 575              increase_young_gen_for_througput_true) {
 576     // Only the young generation may grow at start up (before
 577     // enough full collections have been done to grow the old generation).
 578     young_gen_action = grow_msg;
 579     tenured_gen_action = no_change_msg;
 580   }
 581 
 582   // Minimum footprint
 583   if (decrease_for_footprint() != 0) {
 584     young_gen_action = shrink_msg;
 585     tenured_gen_action = shrink_msg;
 586   }
 587 
 588   log_debug(gc, ergo)("UseAdaptiveSizePolicy actions to meet %s", action);
 589   log_debug(gc, ergo)("                       GC overhead (%%)");
 590   log_debug(gc, ergo)("    Young generation:     %7.2f\t  %s",
 591                       100.0 * avg_minor_gc_cost()->average(), young_gen_action);
 592   log_debug(gc, ergo)("    Tenured generation:   %7.2f\t  %s",
 593                       100.0 * avg_major_gc_cost()->average(), tenured_gen_action);
 594   return true;
 595 }
 596 
 597 void AdaptiveSizePolicy::print_tenuring_threshold( uint new_tenuring_threshold_arg) const {
 598   // Tenuring threshold
 599   if (decrement_tenuring_threshold_for_survivor_limit()) {
 600     log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to avoid survivor space overflow) = %u", new_tenuring_threshold_arg);
 601   } else if (decrement_tenuring_threshold_for_gc_cost()) {
 602     log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to balance GC costs) = %u", new_tenuring_threshold_arg);
 603   } else if (increment_tenuring_threshold_for_gc_cost()) {
 604     log_debug(gc, ergo)("Tenuring threshold: (attempted to increase to balance GC costs) = %u", new_tenuring_threshold_arg);
 605   } else {
 606     assert(!tenuring_threshold_change(), "(no change was attempted)");
 607   }
 608 }