1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
  27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  28 #include "gc_implementation/shared/vmGCOperations.hpp"
  29 #include "memory/cardTableRS.hpp"
  30 #include "memory/collectorPolicy.hpp"
  31 #include "memory/gcLocker.inline.hpp"
  32 #include "memory/genCollectedHeap.hpp"
  33 #include "memory/generationSpec.hpp"
  34 #include "memory/space.hpp"
  35 #include "memory/universe.hpp"
  36 #include "runtime/arguments.hpp"
  37 #include "runtime/globals_extension.hpp"
  38 #include "runtime/handles.inline.hpp"
  39 #include "runtime/java.hpp"
  40 #include "runtime/vmThread.hpp"
  41 #ifdef TARGET_OS_FAMILY_linux
  42 # include "thread_linux.inline.hpp"
  43 #endif
  44 #ifdef TARGET_OS_FAMILY_solaris
  45 # include "thread_solaris.inline.hpp"
  46 #endif
  47 #ifdef TARGET_OS_FAMILY_windows
  48 # include "thread_windows.inline.hpp"
  49 #endif
  50 #ifdef TARGET_OS_FAMILY_bsd
  51 # include "thread_bsd.inline.hpp"
  52 #endif
  53 #ifndef SERIALGC
  54 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  55 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
  56 #endif
  57 
  58 // CollectorPolicy methods.
  59 
  60 void CollectorPolicy::initialize_flags() {
  61   if (MetaspaceSize > MaxMetaspaceSize) {
  62     MaxMetaspaceSize = MetaspaceSize;
  63   }
  64   MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
  65   // Don't increase Metaspace size limit above specified.
  66   MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
  67   if (MetaspaceSize > MaxMetaspaceSize) {
  68     MetaspaceSize = MaxMetaspaceSize;
  69   }
  70 
  71   MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
  72   MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
  73 
  74   MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
  75 
  76   assert(MetaspaceSize    % min_alignment() == 0, "metapace alignment");
  77   assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
  78   if (MetaspaceSize < 256*K) {
  79     vm_exit_during_initialization("Too small initial Metaspace size");
  80   }
  81 }
  82 
  83 void CollectorPolicy::initialize_size_info() {
  84   // User inputs from -mx and ms are aligned
  85   set_initial_heap_byte_size(InitialHeapSize);
  86   if (initial_heap_byte_size() == 0) {
  87     set_initial_heap_byte_size(NewSize + OldSize);
  88   }
  89   set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size,
  90                                            min_alignment()));
  91 
  92   set_min_heap_byte_size(Arguments::min_heap_size());
  93   if (min_heap_byte_size() == 0) {
  94     set_min_heap_byte_size(NewSize + OldSize);
  95   }
  96   set_min_heap_byte_size(align_size_up(_min_heap_byte_size,
  97                                        min_alignment()));
  98 
  99   set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
 100 
 101   // Check heap parameter properties
 102   if (initial_heap_byte_size() < M) {
 103     vm_exit_during_initialization("Too small initial heap");
 104   }
 105   // Check heap parameter properties
 106   if (min_heap_byte_size() < M) {
 107     vm_exit_during_initialization("Too small minimum heap");
 108   }
 109   if (initial_heap_byte_size() <= NewSize) {
 110      // make sure there is at least some room in old space
 111     vm_exit_during_initialization("Too small initial heap for new size specified");
 112   }
 113   if (max_heap_byte_size() < min_heap_byte_size()) {
 114     vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
 115   }
 116   if (initial_heap_byte_size() < min_heap_byte_size()) {
 117     vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
 118   }
 119   if (max_heap_byte_size() < initial_heap_byte_size()) {
 120     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
 121   }
 122 
 123   if (PrintGCDetails && Verbose) {
 124     gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT "  Initial heap "
 125       SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,
 126       min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
 127   }
 128 }
 129 
 130 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
 131   bool result = _should_clear_all_soft_refs;
 132   set_should_clear_all_soft_refs(false);
 133   return result;
 134 }
 135 
 136 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
 137                                            int max_covered_regions) {
 138   switch (rem_set_name()) {
 139   case GenRemSet::CardTable: {
 140     CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
 141     return res;
 142   }
 143   default:
 144     guarantee(false, "unrecognized GenRemSet::Name");
 145     return NULL;
 146   }
 147 }
 148 
 149 void CollectorPolicy::cleared_all_soft_refs() {
 150   // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
 151   // have been cleared in the last collection but if the gc overhear
 152   // limit continues to be near, SoftRefs should still be cleared.
 153   if (size_policy() != NULL) {
 154     _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
 155   }
 156   _all_soft_refs_clear = true;
 157 }
 158 
 159 
 160 // GenCollectorPolicy methods.
 161 
 162 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
 163   size_t x = base_size / (NewRatio+1);
 164   size_t new_gen_size = x > min_alignment() ?
 165                      align_size_down(x, min_alignment()) :
 166                      min_alignment();
 167   return new_gen_size;
 168 }
 169 
 170 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
 171                                                  size_t maximum_size) {
 172   size_t alignment = min_alignment();
 173   size_t max_minus = maximum_size - alignment;
 174   return desired_size < max_minus ? desired_size : max_minus;
 175 }
 176 
 177 
 178 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
 179                                                 size_t init_promo_size,
 180                                                 size_t init_survivor_size) {
 181   const double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 182   _size_policy = new AdaptiveSizePolicy(init_eden_size,
 183                                         init_promo_size,
 184                                         init_survivor_size,
 185                                         max_gc_minor_pause_sec,
 186                                         GCTimeRatio);
 187 }
 188 
 189 size_t GenCollectorPolicy::compute_max_alignment() {
 190   // The card marking array and the offset arrays for old generations are
 191   // committed in os pages as well. Make sure they are entirely full (to
 192   // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
 193   // byte entry and the os page size is 4096, the maximum heap size should
 194   // be 512*4096 = 2MB aligned.
 195   size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
 196 
 197   // Parallel GC does its own alignment of the generations to avoid requiring a
 198   // large page (256M on some platforms) for the permanent generation.  The
 199   // other collectors should also be updated to do their own alignment and then
 200   // this use of lcm() should be removed.
 201   if (UseLargePages && !UseParallelGC) {
 202       // in presence of large pages we have to make sure that our
 203       // alignment is large page aware
 204       alignment = lcm(os::large_page_size(), alignment);
 205   }
 206 
 207   return alignment;
 208 }
 209 
 210 void GenCollectorPolicy::initialize_flags() {
 211   // All sizes must be multiples of the generation granularity.
 212   set_min_alignment((uintx) Generation::GenGrain);
 213   set_max_alignment(compute_max_alignment());
 214   assert(max_alignment() >= min_alignment() &&
 215          max_alignment() % min_alignment() == 0,
 216          "invalid alignment constraints");
 217 
 218   CollectorPolicy::initialize_flags();
 219 
 220   // All generational heaps have a youngest gen; handle those flags here.
 221 
 222   // Adjust max size parameters
 223   if (NewSize > MaxNewSize) {
 224     MaxNewSize = NewSize;
 225   }
 226   NewSize = align_size_down(NewSize, min_alignment());
 227   MaxNewSize = align_size_down(MaxNewSize, min_alignment());
 228 
 229   // Check validity of heap flags
 230   assert(NewSize     % min_alignment() == 0, "eden space alignment");
 231   assert(MaxNewSize  % min_alignment() == 0, "survivor space alignment");
 232 
 233   if (NewSize < 3*min_alignment()) {
 234      // make sure there room for eden and two survivor spaces
 235     vm_exit_during_initialization("Too small new size specified");
 236   }
 237   if (SurvivorRatio < 1 || NewRatio < 1) {
 238     vm_exit_during_initialization("Invalid heap ratio specified");
 239   }
 240 }
 241 
 242 void TwoGenerationCollectorPolicy::initialize_flags() {
 243   GenCollectorPolicy::initialize_flags();
 244 
 245   OldSize = align_size_down(OldSize, min_alignment());
 246   if (NewSize + OldSize > MaxHeapSize) {
 247     MaxHeapSize = NewSize + OldSize;
 248   }
 249   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
 250 
 251   always_do_update_barrier = UseConcMarkSweepGC;
 252 
 253   // Check validity of heap flags
 254   assert(OldSize     % min_alignment() == 0, "old space alignment");
 255   assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
 256 }
 257 
 258 // Values set on the command line win over any ergonomically
 259 // set command line parameters.
 260 // Ergonomic choice of parameters are done before this
 261 // method is called.  Values for command line parameters such as NewSize
 262 // and MaxNewSize feed those ergonomic choices into this method.
 263 // This method makes the final generation sizings consistent with
 264 // themselves and with overall heap sizings.
 265 // In the absence of explicitly set command line flags, policies
 266 // such as the use of NewRatio are used to size the generation.
 267 void GenCollectorPolicy::initialize_size_info() {
 268   CollectorPolicy::initialize_size_info();
 269 
 270   // min_alignment() is used for alignment within a generation.
 271   // There is additional alignment done down stream for some
 272   // collectors that sometimes causes unwanted rounding up of
 273   // generations sizes.
 274 
 275   // Determine maximum size of gen0
 276 
 277   size_t max_new_size = 0;
 278   if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
 279     if (MaxNewSize < min_alignment()) {
 280       max_new_size = min_alignment();
 281     }
 282     if (MaxNewSize >= max_heap_byte_size()) {
 283       max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
 284                                      min_alignment());
 285       warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
 286         "greater than the entire heap (" SIZE_FORMAT "k).  A "
 287         "new generation size of " SIZE_FORMAT "k will be used.",
 288         MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
 289     } else {
 290       max_new_size = align_size_down(MaxNewSize, min_alignment());
 291     }
 292 
 293   // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
 294   // specially at this point to just use an ergonomically set
 295   // MaxNewSize to set max_new_size.  For cases with small
 296   // heaps such a policy often did not work because the MaxNewSize
 297   // was larger than the entire heap.  The interpretation given
 298   // to ergonomically set flags is that the flags are set
 299   // by different collectors for their own special needs but
 300   // are not allowed to badly shape the heap.  This allows the
 301   // different collectors to decide what's best for themselves
 302   // without having to factor in the overall heap shape.  It
 303   // can be the case in the future that the collectors would
 304   // only make "wise" ergonomics choices and this policy could
 305   // just accept those choices.  The choices currently made are
 306   // not always "wise".
 307   } else {
 308     max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
 309     // Bound the maximum size by NewSize below (since it historically
 310     // would have been NewSize and because the NewRatio calculation could
 311     // yield a size that is too small) and bound it by MaxNewSize above.
 312     // Ergonomics plays here by previously calculating the desired
 313     // NewSize and MaxNewSize.
 314     max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
 315   }
 316   assert(max_new_size > 0, "All paths should set max_new_size");
 317 
 318   // Given the maximum gen0 size, determine the initial and
 319   // minimum gen0 sizes.
 320 
 321   if (max_heap_byte_size() == min_heap_byte_size()) {
 322     // The maximum and minimum heap sizes are the same so
 323     // the generations minimum and initial must be the
 324     // same as its maximum.
 325     set_min_gen0_size(max_new_size);
 326     set_initial_gen0_size(max_new_size);
 327     set_max_gen0_size(max_new_size);
 328   } else {
 329     size_t desired_new_size = 0;
 330     if (!FLAG_IS_DEFAULT(NewSize)) {
 331       // If NewSize is set ergonomically (for example by cms), it
 332       // would make sense to use it.  If it is used, also use it
 333       // to set the initial size.  Although there is no reason
 334       // the minimum size and the initial size have to be the same,
 335       // the current implementation gets into trouble during the calculation
 336       // of the tenured generation sizes if they are different.
 337       // Note that this makes the initial size and the minimum size
 338       // generally small compared to the NewRatio calculation.
 339       _min_gen0_size = NewSize;
 340       desired_new_size = NewSize;
 341       max_new_size = MAX2(max_new_size, NewSize);
 342     } else {
 343       // For the case where NewSize is the default, use NewRatio
 344       // to size the minimum and initial generation sizes.
 345       // Use the default NewSize as the floor for these values.  If
 346       // NewRatio is overly large, the resulting sizes can be too
 347       // small.
 348       _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
 349                           NewSize);
 350       desired_new_size =
 351         MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
 352              NewSize);
 353     }
 354 
 355     assert(_min_gen0_size > 0, "Sanity check");
 356     set_initial_gen0_size(desired_new_size);
 357     set_max_gen0_size(max_new_size);
 358 
 359     // At this point the desirable initial and minimum sizes have been
 360     // determined without regard to the maximum sizes.
 361 
 362     // Bound the sizes by the corresponding overall heap sizes.
 363     set_min_gen0_size(
 364       bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
 365     set_initial_gen0_size(
 366       bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
 367     set_max_gen0_size(
 368       bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
 369 
 370     // At this point all three sizes have been checked against the
 371     // maximum sizes but have not been checked for consistency
 372     // among the three.
 373 
 374     // Final check min <= initial <= max
 375     set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
 376     set_initial_gen0_size(
 377       MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
 378     set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
 379   }
 380 
 381   if (PrintGCDetails && Verbose) {
 382     gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
 383       SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
 384       min_gen0_size(), initial_gen0_size(), max_gen0_size());
 385   }
 386 }
 387 
 388 // Call this method during the sizing of the gen1 to make
 389 // adjustments to gen0 because of gen1 sizing policy.  gen0 initially has
 390 // the most freedom in sizing because it is done before the
 391 // policy for gen1 is applied.  Once gen1 policies have been applied,
 392 // there may be conflicts in the shape of the heap and this method
 393 // is used to make the needed adjustments.  The application of the
 394 // policies could be more sophisticated (iterative for example) but
 395 // keeping it simple also seems a worthwhile goal.
 396 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
 397                                                      size_t* gen1_size_ptr,
 398                                                      size_t heap_size,
 399                                                      size_t min_gen0_size) {
 400   bool result = false;
 401   if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
 402     if (((*gen0_size_ptr + OldSize) > heap_size) &&
 403        (heap_size - min_gen0_size) >= min_alignment()) {
 404       // Adjust gen0 down to accomodate OldSize
 405       *gen0_size_ptr = heap_size - min_gen0_size;
 406       *gen0_size_ptr =
 407         MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
 408              min_alignment());
 409       assert(*gen0_size_ptr > 0, "Min gen0 is too large");
 410       result = true;
 411     } else {
 412       *gen1_size_ptr = heap_size - *gen0_size_ptr;
 413       *gen1_size_ptr =
 414         MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
 415                        min_alignment());
 416     }
 417   }
 418   return result;
 419 }
 420 
 421 // Minimum sizes of the generations may be different than
 422 // the initial sizes.  An inconsistently is permitted here
 423 // in the total size that can be specified explicitly by
 424 // command line specification of OldSize and NewSize and
 425 // also a command line specification of -Xms.  Issue a warning
 426 // but allow the values to pass.
 427 
 428 void TwoGenerationCollectorPolicy::initialize_size_info() {
 429   GenCollectorPolicy::initialize_size_info();
 430 
 431   // At this point the minimum, initial and maximum sizes
 432   // of the overall heap and of gen0 have been determined.
 433   // The maximum gen1 size can be determined from the maximum gen0
 434   // and maximum heap size since no explicit flags exits
 435   // for setting the gen1 maximum.
 436   _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
 437   _max_gen1_size =
 438     MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
 439          min_alignment());
 440   // If no explicit command line flag has been set for the
 441   // gen1 size, use what is left for gen1.
 442   if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
 443     // The user has not specified any value or ergonomics
 444     // has chosen a value (which may or may not be consistent
 445     // with the overall heap size).  In either case make
 446     // the minimum, maximum and initial sizes consistent
 447     // with the gen0 sizes and the overall heap sizes.
 448     assert(min_heap_byte_size() > _min_gen0_size,
 449       "gen0 has an unexpected minimum size");
 450     set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
 451     set_min_gen1_size(
 452       MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
 453            min_alignment()));
 454     set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
 455     set_initial_gen1_size(
 456       MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
 457            min_alignment()));
 458 
 459   } else {
 460     // It's been explicitly set on the command line.  Use the
 461     // OldSize and then determine the consequences.
 462     set_min_gen1_size(OldSize);
 463     set_initial_gen1_size(OldSize);
 464 
 465     // If the user has explicitly set an OldSize that is inconsistent
 466     // with other command line flags, issue a warning.
 467     // The generation minimums and the overall heap mimimum should
 468     // be within one heap alignment.
 469     if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
 470            min_heap_byte_size()) {
 471       warning("Inconsistency between minimum heap size and minimum "
 472           "generation sizes: using minimum heap = " SIZE_FORMAT,
 473           min_heap_byte_size());
 474     }
 475     if ((OldSize > _max_gen1_size)) {
 476       warning("Inconsistency between maximum heap size and maximum "
 477           "generation sizes: using maximum heap = " SIZE_FORMAT
 478           " -XX:OldSize flag is being ignored",
 479           max_heap_byte_size());
 480     }
 481     // If there is an inconsistency between the OldSize and the minimum and/or
 482     // initial size of gen0, since OldSize was explicitly set, OldSize wins.
 483     if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
 484                           min_heap_byte_size(), OldSize)) {
 485       if (PrintGCDetails && Verbose) {
 486         gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
 487               SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
 488               min_gen0_size(), initial_gen0_size(), max_gen0_size());
 489       }
 490     }
 491     // Initial size
 492     if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
 493                          initial_heap_byte_size(), OldSize)) {
 494       if (PrintGCDetails && Verbose) {
 495         gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
 496           SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
 497           min_gen0_size(), initial_gen0_size(), max_gen0_size());
 498       }
 499     }
 500   }
 501   // Enforce the maximum gen1 size.
 502   set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
 503 
 504   // Check that min gen1 <= initial gen1 <= max gen1
 505   set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
 506   set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
 507 
 508   if (PrintGCDetails && Verbose) {
 509     gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT "  Initial gen1 "
 510       SIZE_FORMAT "  Maximum gen1 " SIZE_FORMAT,
 511       min_gen1_size(), initial_gen1_size(), max_gen1_size());
 512   }
 513 }
 514 
 515 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
 516                                         bool is_tlab,
 517                                         bool* gc_overhead_limit_was_exceeded) {
 518   GenCollectedHeap *gch = GenCollectedHeap::heap();
 519 
 520   debug_only(gch->check_for_valid_allocation_state());
 521   assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
 522 
 523   // In general gc_overhead_limit_was_exceeded should be false so
 524   // set it so here and reset it to true only if the gc time
 525   // limit is being exceeded as checked below.
 526   *gc_overhead_limit_was_exceeded = false;
 527 
 528   HeapWord* result = NULL;
 529 
 530   // Loop until the allocation is satisified,
 531   // or unsatisfied after GC.
 532   for (int try_count = 1; /* return or throw */; try_count += 1) {
 533     HandleMark hm; // discard any handles allocated in each iteration
 534 
 535     // First allocation attempt is lock-free.
 536     Generation *gen0 = gch->get_gen(0);
 537     assert(gen0->supports_inline_contig_alloc(),
 538       "Otherwise, must do alloc within heap lock");
 539     if (gen0->should_allocate(size, is_tlab)) {
 540       result = gen0->par_allocate(size, is_tlab);
 541       if (result != NULL) {
 542         assert(gch->is_in_reserved(result), "result not in heap");
 543         return result;
 544       }
 545     }
 546     unsigned int gc_count_before;  // read inside the Heap_lock locked region
 547     {
 548       MutexLocker ml(Heap_lock);
 549       if (PrintGC && Verbose) {
 550         gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
 551                       " attempting locked slow path allocation");
 552       }
 553       // Note that only large objects get a shot at being
 554       // allocated in later generations.
 555       bool first_only = ! should_try_older_generation_allocation(size);
 556 
 557       result = gch->attempt_allocation(size, is_tlab, first_only);
 558       if (result != NULL) {
 559         assert(gch->is_in_reserved(result), "result not in heap");
 560         return result;
 561       }
 562 
 563       if (GC_locker::is_active_and_needs_gc()) {
 564         if (is_tlab) {
 565           return NULL;  // Caller will retry allocating individual object
 566         }
 567         if (!gch->is_maximal_no_gc()) {
 568           // Try and expand heap to satisfy request
 569           result = expand_heap_and_allocate(size, is_tlab);
 570           // result could be null if we are out of space
 571           if (result != NULL) {
 572             return result;
 573           }
 574         }
 575 
 576         // If this thread is not in a jni critical section, we stall
 577         // the requestor until the critical section has cleared and
 578         // GC allowed. When the critical section clears, a GC is
 579         // initiated by the last thread exiting the critical section; so
 580         // we retry the allocation sequence from the beginning of the loop,
 581         // rather than causing more, now probably unnecessary, GC attempts.
 582         JavaThread* jthr = JavaThread::current();
 583         if (!jthr->in_critical()) {
 584           MutexUnlocker mul(Heap_lock);
 585           // Wait for JNI critical section to be exited
 586           GC_locker::stall_until_clear();
 587           continue;
 588         } else {
 589           if (CheckJNICalls) {
 590             fatal("Possible deadlock due to allocating while"
 591                   " in jni critical section");
 592           }
 593           return NULL;
 594         }
 595       }
 596 
 597       // Read the gc count while the heap lock is held.
 598       gc_count_before = Universe::heap()->total_collections();
 599     }
 600 
 601     VM_GenCollectForAllocation op(size,
 602                                   is_tlab,
 603                                   gc_count_before);
 604     VMThread::execute(&op);
 605     if (op.prologue_succeeded()) {
 606       result = op.result();
 607       if (op.gc_locked()) {
 608          assert(result == NULL, "must be NULL if gc_locked() is true");
 609          continue;  // retry and/or stall as necessary
 610       }
 611 
 612       // Allocation has failed and a collection
 613       // has been done.  If the gc time limit was exceeded the
 614       // this time, return NULL so that an out-of-memory
 615       // will be thrown.  Clear gc_overhead_limit_exceeded
 616       // so that the overhead exceeded does not persist.
 617 
 618       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 619       const bool softrefs_clear = all_soft_refs_clear();
 620       assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
 621       if (limit_exceeded && softrefs_clear) {
 622         *gc_overhead_limit_was_exceeded = true;
 623         size_policy()->set_gc_overhead_limit_exceeded(false);
 624         if (op.result() != NULL) {
 625           CollectedHeap::fill_with_object(op.result(), size);
 626         }
 627         return NULL;
 628       }
 629       assert(result == NULL || gch->is_in_reserved(result),
 630              "result not in heap");
 631       return result;
 632     }
 633 
 634     // Give a warning if we seem to be looping forever.
 635     if ((QueuedAllocationWarningCount > 0) &&
 636         (try_count % QueuedAllocationWarningCount == 0)) {
 637           warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
 638                   " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
 639     }
 640   }
 641 }
 642 
 643 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
 644                                                        bool   is_tlab) {
 645   GenCollectedHeap *gch = GenCollectedHeap::heap();
 646   HeapWord* result = NULL;
 647   for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
 648     Generation *gen = gch->get_gen(i);
 649     if (gen->should_allocate(size, is_tlab)) {
 650       result = gen->expand_and_allocate(size, is_tlab);
 651     }
 652   }
 653   assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
 654   return result;
 655 }
 656 
 657 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
 658                                                         bool   is_tlab) {
 659   GenCollectedHeap *gch = GenCollectedHeap::heap();
 660   GCCauseSetter x(gch, GCCause::_allocation_failure);
 661   HeapWord* result = NULL;
 662 
 663   assert(size != 0, "Precondition violated");
 664   if (GC_locker::is_active_and_needs_gc()) {
 665     // GC locker is active; instead of a collection we will attempt
 666     // to expand the heap, if there's room for expansion.
 667     if (!gch->is_maximal_no_gc()) {
 668       result = expand_heap_and_allocate(size, is_tlab);
 669     }
 670     return result;   // could be null if we are out of space
 671   } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
 672     // Do an incremental collection.
 673     gch->do_collection(false            /* full */,
 674                        false            /* clear_all_soft_refs */,
 675                        size             /* size */,
 676                        is_tlab          /* is_tlab */,
 677                        number_of_generations() - 1 /* max_level */);
 678   } else {
 679     if (Verbose && PrintGCDetails) {
 680       gclog_or_tty->print(" :: Trying full because partial may fail :: ");
 681     }
 682     // Try a full collection; see delta for bug id 6266275
 683     // for the original code and why this has been simplified
 684     // with from-space allocation criteria modified and
 685     // such allocation moved out of the safepoint path.
 686     gch->do_collection(true             /* full */,
 687                        false            /* clear_all_soft_refs */,
 688                        size             /* size */,
 689                        is_tlab          /* is_tlab */,
 690                        number_of_generations() - 1 /* max_level */);
 691   }
 692 
 693   result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
 694 
 695   if (result != NULL) {
 696     assert(gch->is_in_reserved(result), "result not in heap");
 697     return result;
 698   }
 699 
 700   // OK, collection failed, try expansion.
 701   result = expand_heap_and_allocate(size, is_tlab);
 702   if (result != NULL) {
 703     return result;
 704   }
 705 
 706   // If we reach this point, we're really out of memory. Try every trick
 707   // we can to reclaim memory. Force collection of soft references. Force
 708   // a complete compaction of the heap. Any additional methods for finding
 709   // free memory should be here, especially if they are expensive. If this
 710   // attempt fails, an OOM exception will be thrown.
 711   {
 712     IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 713 
 714     gch->do_collection(true             /* full */,
 715                        true             /* clear_all_soft_refs */,
 716                        size             /* size */,
 717                        is_tlab          /* is_tlab */,
 718                        number_of_generations() - 1 /* max_level */);
 719   }
 720 
 721   result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
 722   if (result != NULL) {
 723     assert(gch->is_in_reserved(result), "result not in heap");
 724     return result;
 725   }
 726 
 727   assert(!should_clear_all_soft_refs(),
 728     "Flag should have been handled and cleared prior to this point");
 729 
 730   // What else?  We might try synchronous finalization later.  If the total
 731   // space available is large enough for the allocation, then a more
 732   // complete compaction phase than we've tried so far might be
 733   // appropriate.
 734   return NULL;
 735 }
 736 
 737 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
 738                                                  ClassLoaderData* loader_data,
 739                                                  size_t word_size,
 740                                                  Metaspace::MetadataType mdtype) {
 741   uint loop_count = 0;
 742   uint gc_count = 0;
 743   uint full_gc_count = 0;
 744 
 745   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
 746 
 747   do {
 748     MetaWord* result = NULL;
 749     if (GC_locker::is_active_and_needs_gc()) {
 750       // If the GC_locker is active, just expand and allocate.
 751       // If that does not succeed, wait if this thread is not
 752       // in a critical section itself.
 753       result =
 754         loader_data->metaspace_non_null()->expand_and_allocate(word_size,
 755                                                                mdtype);
 756       if (result != NULL) {
 757         return result;
 758       }
 759       JavaThread* jthr = JavaThread::current();
 760       if (!jthr->in_critical()) {
 761         // Wait for JNI critical section to be exited
 762         GC_locker::stall_until_clear();
 763         // The GC invoked by the last thread leaving the critical
 764         // section will be a young collection and a full collection
 765         // is (currently) needed for unloading classes so continue
 766         // to the next iteration to get a full GC.
 767         continue;
 768       } else {
 769         if (CheckJNICalls) {
 770           fatal("Possible deadlock due to allocating while"
 771                 " in jni critical section");
 772         }
 773         return NULL;
 774       }
 775     }
 776 
 777     {  // Need lock to get self consistent gc_count's
 778       MutexLocker ml(Heap_lock);
 779       gc_count      = Universe::heap()->total_collections();
 780       full_gc_count = Universe::heap()->total_full_collections();
 781     }
 782 
 783     // Generate a VM operation
 784     VM_CollectForMetadataAllocation op(loader_data,
 785                                        word_size,
 786                                        mdtype,
 787                                        gc_count,
 788                                        full_gc_count,
 789                                        GCCause::_metadata_GC_threshold);
 790     VMThread::execute(&op);
 791     if (op.prologue_succeeded()) {
 792       return op.result();
 793     }
 794     loop_count++;
 795     if ((QueuedAllocationWarningCount > 0) &&
 796         (loop_count % QueuedAllocationWarningCount == 0)) {
 797       warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
 798               " size=%d", loop_count, word_size);
 799     }
 800   } while (true);  // Until a GC is done
 801 }
 802 
 803 // Return true if any of the following is true:
 804 // . the allocation won't fit into the current young gen heap
 805 // . gc locker is occupied (jni critical section)
 806 // . heap memory is tight -- the most recent previous collection
 807 //   was a full collection because a partial collection (would
 808 //   have) failed and is likely to fail again
 809 bool GenCollectorPolicy::should_try_older_generation_allocation(
 810         size_t word_size) const {
 811   GenCollectedHeap* gch = GenCollectedHeap::heap();
 812   size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
 813   return    (word_size > heap_word_size(gen0_capacity))
 814          || GC_locker::is_active_and_needs_gc()
 815          || gch->incremental_collection_failed();
 816 }
 817 
 818 
 819 //
 820 // MarkSweepPolicy methods
 821 //
 822 
 823 MarkSweepPolicy::MarkSweepPolicy() {
 824   initialize_all();
 825 }
 826 
 827 void MarkSweepPolicy::initialize_generations() {
 828   _generations = new GenerationSpecPtr[number_of_generations()];
 829   if (_generations == NULL)
 830     vm_exit_during_initialization("Unable to allocate gen spec");
 831 
 832   if (UseParNewGC && ParallelGCThreads > 0) {
 833     _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
 834   } else {
 835     _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size);
 836   }
 837   _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
 838 
 839   if (_generations[0] == NULL || _generations[1] == NULL)
 840     vm_exit_during_initialization("Unable to allocate gen spec");
 841 }
 842 
 843 void MarkSweepPolicy::initialize_gc_policy_counters() {
 844   // initialize the policy counters - 2 collectors, 3 generations
 845   if (UseParNewGC && ParallelGCThreads > 0) {
 846     _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
 847   }
 848   else {
 849     _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
 850   }
 851 }