1 /*
   2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "aot/aotLoader.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/stringTable.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/codeCache.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "gc/serial/defNewGeneration.hpp"
  35 #include "gc/shared/adaptiveSizePolicy.hpp"
  36 #include "gc/shared/cardTableBarrierSet.hpp"
  37 #include "gc/shared/cardTableRS.hpp"
  38 #include "gc/shared/collectedHeap.inline.hpp"
  39 #include "gc/shared/collectorCounters.hpp"
  40 #include "gc/shared/gcId.hpp"
  41 #include "gc/shared/gcLocker.hpp"
  42 #include "gc/shared/gcPolicyCounters.hpp"
  43 #include "gc/shared/gcTrace.hpp"
  44 #include "gc/shared/gcTraceTime.inline.hpp"
  45 #include "gc/shared/genArguments.hpp"
  46 #include "gc/shared/gcVMOperations.hpp"
  47 #include "gc/shared/genCollectedHeap.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/generationSpec.hpp"
  50 #include "gc/shared/oopStorageParState.inline.hpp"
  51 #include "gc/shared/scavengableNMethods.hpp"
  52 #include "gc/shared/space.hpp"
  53 #include "gc/shared/strongRootsScope.hpp"
  54 #include "gc/shared/weakProcessor.hpp"
  55 #include "gc/shared/workgroup.hpp"
  56 #include "memory/filemap.hpp"
  57 #include "memory/metaspaceCounters.hpp"
  58 #include "memory/resourceArea.hpp"
  59 #include "memory/universe.hpp"
  60 #include "oops/oop.inline.hpp"
  61 #include "runtime/biasedLocking.hpp"
  62 #include "runtime/flags/flagSetting.hpp"
  63 #include "runtime/handles.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/java.hpp"
  66 #include "runtime/vmThread.hpp"
  67 #include "services/management.hpp"
  68 #include "services/memoryService.hpp"
  69 #include "utilities/debug.hpp"
  70 #include "utilities/formatBuffer.hpp"
  71 #include "utilities/macros.hpp"
  72 #include "utilities/stack.inline.hpp"
  73 #include "utilities/vmError.hpp"
  74 #if INCLUDE_JVMCI
  75 #include "jvmci/jvmci.hpp"
  76 #endif
  77 
  78 GenCollectedHeap::GenCollectedHeap(Generation::Name young,
  79                                    Generation::Name old,
  80                                    const char* policy_counters_name) :
  81   CollectedHeap(),
  82   _young_gen_spec(new GenerationSpec(young,
  83                                      NewSize,
  84                                      MaxNewSize,
  85                                      GenAlignment)),
  86   _old_gen_spec(new GenerationSpec(old,
  87                                    OldSize,
  88                                    MaxOldSize,
  89                                    GenAlignment)),
  90   _rem_set(NULL),
  91   _soft_ref_gen_policy(),
  92   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
  93   _full_collections_completed(0),
  94   _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)) {
  95 }
  96 
  97 jint GenCollectedHeap::initialize() {
  98   // While there are no constraints in the GC code that HeapWordSize
  99   // be any particular value, there are multiple other areas in the
 100   // system which believe this to be true (e.g. oop->object_size in some
 101   // cases incorrectly returns the size in wordSize units rather than
 102   // HeapWordSize).
 103   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 104 
 105   // Allocate space for the heap.
 106 
 107   char* heap_address;
 108   ReservedSpace heap_rs;
 109 
 110   heap_address = allocate(HeapAlignment, &heap_rs);
 111 
 112   if (!heap_rs.is_reserved()) {
 113     vm_shutdown_during_initialization(
 114       "Could not reserve enough space for object heap");
 115     return JNI_ENOMEM;
 116   }
 117 
 118   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 119 
 120   _rem_set = create_rem_set(reserved_region());
 121   _rem_set->initialize();
 122   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 123   bs->initialize();
 124   BarrierSet::set_barrier_set(bs);
 125 
 126   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size(), false, false);
 127   _young_gen = _young_gen_spec->init(young_rs, rem_set());
 128   heap_rs = heap_rs.last_part(_young_gen_spec->max_size());
 129 
 130   ReservedSpace old_rs = heap_rs.first_part(_old_gen_spec->max_size(), false, false);
 131   _old_gen = _old_gen_spec->init(old_rs, rem_set());
 132   clear_incremental_collection_failed();
 133 
 134   return JNI_OK;
 135 }
 136 
 137 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
 138   return new CardTableRS(reserved_region, false /* scan_concurrently */);
 139 }
 140 
 141 void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
 142                                               size_t init_promo_size,
 143                                               size_t init_survivor_size) {
 144   const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
 145   _size_policy = new AdaptiveSizePolicy(init_eden_size,
 146                                         init_promo_size,
 147                                         init_survivor_size,
 148                                         max_gc_pause_sec,
 149                                         GCTimeRatio);
 150 }
 151 
 152 char* GenCollectedHeap::allocate(size_t alignment,
 153                                  ReservedSpace* heap_rs){
 154   // Now figure out the total size.
 155   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 156   assert(alignment % pageSize == 0, "Must be");
 157 
 158   // Check for overflow.
 159   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
 160   if (total_reserved < _young_gen_spec->max_size()) {
 161     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 162                                   "the maximum representable size");
 163   }
 164   assert(total_reserved % alignment == 0,
 165          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 166          SIZE_FORMAT, total_reserved, alignment);
 167 
 168   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 169 
 170   os::trace_page_sizes("Heap",
 171                        MinHeapSize,
 172                        total_reserved,
 173                        alignment,
 174                        heap_rs->base(),
 175                        heap_rs->size());
 176 
 177   return heap_rs->base();
 178 }
 179 
 180 class GenIsScavengable : public BoolObjectClosure {
 181 public:
 182   bool do_object_b(oop obj) {
 183     return GenCollectedHeap::heap()->is_in_young(obj);
 184   }
 185 };
 186 
 187 static GenIsScavengable _is_scavengable;
 188 
 189 void GenCollectedHeap::post_initialize() {
 190   CollectedHeap::post_initialize();
 191   ref_processing_init();
 192 
 193   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 194 
 195   initialize_size_policy(def_new_gen->eden()->capacity(),
 196                          _old_gen->capacity(),
 197                          def_new_gen->from()->capacity());
 198 
 199   MarkSweep::initialize();
 200 
 201   ScavengableNMethods::initialize(&_is_scavengable);
 202 }
 203 
 204 void GenCollectedHeap::ref_processing_init() {
 205   _young_gen->ref_processor_init();
 206   _old_gen->ref_processor_init();
 207 }
 208 
 209 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
 210   return _young_gen_spec;
 211 }
 212 
 213 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
 214   return _old_gen_spec;
 215 }
 216 
 217 size_t GenCollectedHeap::capacity() const {
 218   return _young_gen->capacity() + _old_gen->capacity();
 219 }
 220 
 221 size_t GenCollectedHeap::used() const {
 222   return _young_gen->used() + _old_gen->used();
 223 }
 224 
 225 void GenCollectedHeap::save_used_regions() {
 226   _old_gen->save_used_region();
 227   _young_gen->save_used_region();
 228 }
 229 
 230 size_t GenCollectedHeap::max_capacity() const {
 231   return _young_gen->max_capacity() + _old_gen->max_capacity();
 232 }
 233 
 234 // Update the _full_collections_completed counter
 235 // at the end of a stop-world full GC.
 236 unsigned int GenCollectedHeap::update_full_collections_completed() {
 237   MonitorLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 238   assert(_full_collections_completed <= _total_full_collections,
 239          "Can't complete more collections than were started");
 240   _full_collections_completed = _total_full_collections;
 241   ml.notify_all();
 242   return _full_collections_completed;
 243 }
 244 
 245 // Update the _full_collections_completed counter, as appropriate,
 246 // at the end of a concurrent GC cycle. Note the conditional update
 247 // below to allow this method to be called by a concurrent collector
 248 // without synchronizing in any manner with the VM thread (which
 249 // may already have initiated a STW full collection "concurrently").
 250 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
 251   MonitorLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 252   assert((_full_collections_completed <= _total_full_collections) &&
 253          (count <= _total_full_collections),
 254          "Can't complete more collections than were started");
 255   if (count > _full_collections_completed) {
 256     _full_collections_completed = count;
 257     ml.notify_all();
 258   }
 259   return _full_collections_completed;
 260 }
 261 
 262 // Return true if any of the following is true:
 263 // . the allocation won't fit into the current young gen heap
 264 // . gc locker is occupied (jni critical section)
 265 // . heap memory is tight -- the most recent previous collection
 266 //   was a full collection because a partial collection (would
 267 //   have) failed and is likely to fail again
 268 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
 269   size_t young_capacity = _young_gen->capacity_before_gc();
 270   return    (word_size > heap_word_size(young_capacity))
 271          || GCLocker::is_active_and_needs_gc()
 272          || incremental_collection_failed();
 273 }
 274 
 275 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
 276   HeapWord* result = NULL;
 277   if (_old_gen->should_allocate(size, is_tlab)) {
 278     result = _old_gen->expand_and_allocate(size, is_tlab);
 279   }
 280   if (result == NULL) {
 281     if (_young_gen->should_allocate(size, is_tlab)) {
 282       result = _young_gen->expand_and_allocate(size, is_tlab);
 283     }
 284   }
 285   assert(result == NULL || is_in_reserved(result), "result not in heap");
 286   return result;
 287 }
 288 
 289 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
 290                                               bool is_tlab,
 291                                               bool* gc_overhead_limit_was_exceeded) {
 292   // In general gc_overhead_limit_was_exceeded should be false so
 293   // set it so here and reset it to true only if the gc time
 294   // limit is being exceeded as checked below.
 295   *gc_overhead_limit_was_exceeded = false;
 296 
 297   HeapWord* result = NULL;
 298 
 299   // Loop until the allocation is satisfied, or unsatisfied after GC.
 300   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 301     HandleMark hm; // Discard any handles allocated in each iteration.
 302 
 303     // First allocation attempt is lock-free.
 304     Generation *young = _young_gen;
 305     assert(young->supports_inline_contig_alloc(),
 306       "Otherwise, must do alloc within heap lock");
 307     if (young->should_allocate(size, is_tlab)) {
 308       result = young->par_allocate(size, is_tlab);
 309       if (result != NULL) {
 310         assert(is_in_reserved(result), "result not in heap");
 311         return result;
 312       }
 313     }
 314     uint gc_count_before;  // Read inside the Heap_lock locked region.
 315     {
 316       MutexLocker ml(Heap_lock);
 317       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
 318       // Note that only large objects get a shot at being
 319       // allocated in later generations.
 320       bool first_only = !should_try_older_generation_allocation(size);
 321 
 322       result = attempt_allocation(size, is_tlab, first_only);
 323       if (result != NULL) {
 324         assert(is_in_reserved(result), "result not in heap");
 325         return result;
 326       }
 327 
 328       if (GCLocker::is_active_and_needs_gc()) {
 329         if (is_tlab) {
 330           return NULL;  // Caller will retry allocating individual object.
 331         }
 332         if (!is_maximal_no_gc()) {
 333           // Try and expand heap to satisfy request.
 334           result = expand_heap_and_allocate(size, is_tlab);
 335           // Result could be null if we are out of space.
 336           if (result != NULL) {
 337             return result;
 338           }
 339         }
 340 
 341         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 342           return NULL; // We didn't get to do a GC and we didn't get any memory.
 343         }
 344 
 345         // If this thread is not in a jni critical section, we stall
 346         // the requestor until the critical section has cleared and
 347         // GC allowed. When the critical section clears, a GC is
 348         // initiated by the last thread exiting the critical section; so
 349         // we retry the allocation sequence from the beginning of the loop,
 350         // rather than causing more, now probably unnecessary, GC attempts.
 351         JavaThread* jthr = JavaThread::current();
 352         if (!jthr->in_critical()) {
 353           MutexUnlocker mul(Heap_lock);
 354           // Wait for JNI critical section to be exited
 355           GCLocker::stall_until_clear();
 356           gclocker_stalled_count += 1;
 357           continue;
 358         } else {
 359           if (CheckJNICalls) {
 360             fatal("Possible deadlock due to allocating while"
 361                   " in jni critical section");
 362           }
 363           return NULL;
 364         }
 365       }
 366 
 367       // Read the gc count while the heap lock is held.
 368       gc_count_before = total_collections();
 369     }
 370 
 371     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 372     VMThread::execute(&op);
 373     if (op.prologue_succeeded()) {
 374       result = op.result();
 375       if (op.gc_locked()) {
 376          assert(result == NULL, "must be NULL if gc_locked() is true");
 377          continue;  // Retry and/or stall as necessary.
 378       }
 379 
 380       // Allocation has failed and a collection
 381       // has been done.  If the gc time limit was exceeded the
 382       // this time, return NULL so that an out-of-memory
 383       // will be thrown.  Clear gc_overhead_limit_exceeded
 384       // so that the overhead exceeded does not persist.
 385 
 386       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 387       const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
 388 
 389       if (limit_exceeded && softrefs_clear) {
 390         *gc_overhead_limit_was_exceeded = true;
 391         size_policy()->set_gc_overhead_limit_exceeded(false);
 392         if (op.result() != NULL) {
 393           CollectedHeap::fill_with_object(op.result(), size);
 394         }
 395         return NULL;
 396       }
 397       assert(result == NULL || is_in_reserved(result),
 398              "result not in heap");
 399       return result;
 400     }
 401 
 402     // Give a warning if we seem to be looping forever.
 403     if ((QueuedAllocationWarningCount > 0) &&
 404         (try_count % QueuedAllocationWarningCount == 0)) {
 405           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 406                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 407     }
 408   }
 409 }
 410 
 411 #ifndef PRODUCT
 412 // Override of memory state checking method in CollectedHeap:
 413 // Some collectors (CMS for example) can't have badHeapWordVal written
 414 // in the first two words of an object. (For instance , in the case of
 415 // CMS these words hold state used to synchronize between certain
 416 // (concurrent) GC steps and direct allocating mutators.)
 417 // The skip_header_HeapWords() method below, allows us to skip
 418 // over the requisite number of HeapWord's. Note that (for
 419 // generational collectors) this means that those many words are
 420 // skipped in each object, irrespective of the generation in which
 421 // that object lives. The resultant loss of precision seems to be
 422 // harmless and the pain of avoiding that imprecision appears somewhat
 423 // higher than we are prepared to pay for such rudimentary debugging
 424 // support.
 425 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
 426                                                          size_t size) {
 427   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 428     // We are asked to check a size in HeapWords,
 429     // but the memory is mangled in juint words.
 430     juint* start = (juint*) (addr + skip_header_HeapWords());
 431     juint* end   = (juint*) (addr + size);
 432     for (juint* slot = start; slot < end; slot += 1) {
 433       assert(*slot == badHeapWordVal,
 434              "Found non badHeapWordValue in pre-allocation check");
 435     }
 436   }
 437 }
 438 #endif
 439 
 440 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 441                                                bool is_tlab,
 442                                                bool first_only) {
 443   HeapWord* res = NULL;
 444 
 445   if (_young_gen->should_allocate(size, is_tlab)) {
 446     res = _young_gen->allocate(size, is_tlab);
 447     if (res != NULL || first_only) {
 448       return res;
 449     }
 450   }
 451 
 452   if (_old_gen->should_allocate(size, is_tlab)) {
 453     res = _old_gen->allocate(size, is_tlab);
 454   }
 455 
 456   return res;
 457 }
 458 
 459 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 460                                          bool* gc_overhead_limit_was_exceeded) {
 461   return mem_allocate_work(size,
 462                            false /* is_tlab */,
 463                            gc_overhead_limit_was_exceeded);
 464 }
 465 
 466 bool GenCollectedHeap::must_clear_all_soft_refs() {
 467   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 468          _gc_cause == GCCause::_wb_full_gc;
 469 }
 470 
 471 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 472                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 473                                           bool restore_marks_for_biased_locking) {
 474   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 475   GCTraceTime(Trace, gc, phases) t1(title);
 476   TraceCollectorStats tcs(gen->counters());
 477   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause());
 478 
 479   gen->stat_record()->invocations++;
 480   gen->stat_record()->accumulated_time.start();
 481 
 482   // Must be done anew before each collection because
 483   // a previous collection will do mangling and will
 484   // change top of some spaces.
 485   record_gen_tops_before_GC();
 486 
 487   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 488 
 489   if (run_verification && VerifyBeforeGC) {
 490     HandleMark hm;  // Discard invalid handles created during verification
 491     Universe::verify("Before GC");
 492   }
 493   COMPILER2_PRESENT(DerivedPointerTable::clear());
 494 
 495   if (restore_marks_for_biased_locking) {
 496     // We perform this mark word preservation work lazily
 497     // because it's only at this point that we know whether we
 498     // absolutely have to do it; we want to avoid doing it for
 499     // scavenge-only collections where it's unnecessary
 500     BiasedLocking::preserve_marks();
 501   }
 502 
 503   // Do collection work
 504   {
 505     // Note on ref discovery: For what appear to be historical reasons,
 506     // GCH enables and disabled (by enqueing) refs discovery.
 507     // In the future this should be moved into the generation's
 508     // collect method so that ref discovery and enqueueing concerns
 509     // are local to a generation. The collect method could return
 510     // an appropriate indication in the case that notification on
 511     // the ref lock was needed. This will make the treatment of
 512     // weak refs more uniform (and indeed remove such concerns
 513     // from GCH). XXX
 514 
 515     HandleMark hm;  // Discard invalid handles created during gc
 516     save_marks();   // save marks for all gens
 517     // We want to discover references, but not process them yet.
 518     // This mode is disabled in process_discovered_references if the
 519     // generation does some collection work, or in
 520     // enqueue_discovered_references if the generation returns
 521     // without doing any work.
 522     ReferenceProcessor* rp = gen->ref_processor();
 523     // If the discovery of ("weak") refs in this generation is
 524     // atomic wrt other collectors in this configuration, we
 525     // are guaranteed to have empty discovered ref lists.
 526     if (rp->discovery_is_atomic()) {
 527       rp->enable_discovery();
 528       rp->setup_policy(clear_soft_refs);
 529     } else {
 530       // collect() below will enable discovery as appropriate
 531     }
 532     gen->collect(full, clear_soft_refs, size, is_tlab);
 533     if (!rp->enqueuing_is_done()) {
 534       rp->disable_discovery();
 535     } else {
 536       rp->set_enqueuing_is_done(false);
 537     }
 538     rp->verify_no_references_recorded();
 539   }
 540 
 541   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 542 
 543   gen->stat_record()->accumulated_time.stop();
 544 
 545   update_gc_stats(gen, full);
 546 
 547   if (run_verification && VerifyAfterGC) {
 548     HandleMark hm;  // Discard invalid handles created during verification
 549     Universe::verify("After GC");
 550   }
 551 }
 552 
 553 void GenCollectedHeap::do_collection(bool           full,
 554                                      bool           clear_all_soft_refs,
 555                                      size_t         size,
 556                                      bool           is_tlab,
 557                                      GenerationType max_generation) {
 558   ResourceMark rm;
 559   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 560 
 561   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 562   assert(my_thread->is_VM_thread() ||
 563          my_thread->is_ConcurrentGC_thread(),
 564          "incorrect thread type capability");
 565   assert(Heap_lock->is_locked(),
 566          "the requesting thread should have the Heap_lock");
 567   guarantee(!is_gc_active(), "collection is not reentrant");
 568 
 569   if (GCLocker::check_active_before_gc()) {
 570     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 571   }
 572 
 573   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 574                           soft_ref_policy()->should_clear_all_soft_refs();
 575 
 576   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 577 
 578   FlagSetting fl(_is_gc_active, true);
 579 
 580   bool complete = full && (max_generation == OldGen);
 581   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 582   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 583 
 584   size_t young_prev_used = _young_gen->used();
 585   size_t old_prev_used = _old_gen->used();
 586   const metaspace::MetaspaceSizesSnapshot prev_meta_sizes;
 587 
 588   bool run_verification = total_collections() >= VerifyGCStartAt;
 589   bool prepared_for_verification = false;
 590   bool do_full_collection = false;
 591 
 592   if (do_young_collection) {
 593     GCIdMark gc_id_mark;
 594     GCTraceCPUTime tcpu;
 595     GCTraceTime(Info, gc) t("Pause Young", NULL, gc_cause(), true);
 596 
 597     print_heap_before_gc();
 598 
 599     if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 600       prepare_for_verify();
 601       prepared_for_verification = true;
 602     }
 603 
 604     gc_prologue(complete);
 605     increment_total_collections(complete);
 606 
 607     collect_generation(_young_gen,
 608                        full,
 609                        size,
 610                        is_tlab,
 611                        run_verification && VerifyGCLevel <= 0,
 612                        do_clear_all_soft_refs,
 613                        false);
 614 
 615     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 616         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 617       // Allocation request was met by young GC.
 618       size = 0;
 619     }
 620 
 621     // Ask if young collection is enough. If so, do the final steps for young collection,
 622     // and fallthrough to the end.
 623     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 624     if (!do_full_collection) {
 625       // Adjust generation sizes.
 626       _young_gen->compute_new_size();
 627 
 628       print_heap_change(young_prev_used, old_prev_used);
 629       MetaspaceUtils::print_metaspace_change(prev_meta_sizes);
 630 
 631       // Track memory usage and detect low memory after GC finishes
 632       MemoryService::track_memory_usage();
 633 
 634       gc_epilogue(complete);
 635     }
 636 
 637     print_heap_after_gc();
 638 
 639   } else {
 640     // No young collection, ask if we need to perform Full collection.
 641     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 642   }
 643 
 644   if (do_full_collection) {
 645     GCIdMark gc_id_mark;
 646     GCTraceCPUTime tcpu;
 647     GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause(), true);
 648 
 649     print_heap_before_gc();
 650 
 651     if (!prepared_for_verification && run_verification &&
 652         VerifyGCLevel <= 1 && VerifyBeforeGC) {
 653       prepare_for_verify();
 654     }
 655 
 656     if (!do_young_collection) {
 657       gc_prologue(complete);
 658       increment_total_collections(complete);
 659     }
 660 
 661     // Accounting quirk: total full collections would be incremented when "complete"
 662     // is set, by calling increment_total_collections above. However, we also need to
 663     // account Full collections that had "complete" unset.
 664     if (!complete) {
 665       increment_total_full_collections();
 666     }
 667 
 668     collect_generation(_old_gen,
 669                        full,
 670                        size,
 671                        is_tlab,
 672                        run_verification && VerifyGCLevel <= 1,
 673                        do_clear_all_soft_refs,
 674                        true);
 675 
 676     // Adjust generation sizes.
 677     _old_gen->compute_new_size();
 678     _young_gen->compute_new_size();
 679 
 680     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 681     ClassLoaderDataGraph::purge();
 682     MetaspaceUtils::verify_metrics();
 683     // Resize the metaspace capacity after full collections
 684     MetaspaceGC::compute_new_size();
 685     update_full_collections_completed();
 686 
 687     print_heap_change(young_prev_used, old_prev_used);
 688     MetaspaceUtils::print_metaspace_change(prev_meta_sizes);
 689 
 690     // Track memory usage and detect low memory after GC finishes
 691     MemoryService::track_memory_usage();
 692 
 693     // Need to tell the epilogue code we are done with Full GC, regardless what was
 694     // the initial value for "complete" flag.
 695     gc_epilogue(true);
 696 
 697     BiasedLocking::restore_marks();
 698 
 699     print_heap_after_gc();
 700   }
 701 
 702 #ifdef TRACESPINNING
 703   ParallelTaskTerminator::print_termination_counts();
 704 #endif
 705 }
 706 
 707 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 708                                                  GenCollectedHeap::GenerationType max_gen) const {
 709   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 710 }
 711 
 712 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 713   ScavengableNMethods::register_nmethod(nm);
 714 }
 715 
 716 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 717   ScavengableNMethods::unregister_nmethod(nm);
 718 }
 719 
 720 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 721   ScavengableNMethods::verify_nmethod(nm);
 722 }
 723 
 724 void GenCollectedHeap::flush_nmethod(nmethod* nm) {
 725   // Do nothing.
 726 }
 727 
 728 void GenCollectedHeap::prune_scavengable_nmethods() {
 729   ScavengableNMethods::prune_nmethods();
 730 }
 731 
 732 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 733   GCCauseSetter x(this, GCCause::_allocation_failure);
 734   HeapWord* result = NULL;
 735 
 736   assert(size != 0, "Precondition violated");
 737   if (GCLocker::is_active_and_needs_gc()) {
 738     // GC locker is active; instead of a collection we will attempt
 739     // to expand the heap, if there's room for expansion.
 740     if (!is_maximal_no_gc()) {
 741       result = expand_heap_and_allocate(size, is_tlab);
 742     }
 743     return result;   // Could be null if we are out of space.
 744   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 745     // Do an incremental collection.
 746     do_collection(false,                     // full
 747                   false,                     // clear_all_soft_refs
 748                   size,                      // size
 749                   is_tlab,                   // is_tlab
 750                   GenCollectedHeap::OldGen); // max_generation
 751   } else {
 752     log_trace(gc)(" :: Trying full because partial may fail :: ");
 753     // Try a full collection; see delta for bug id 6266275
 754     // for the original code and why this has been simplified
 755     // with from-space allocation criteria modified and
 756     // such allocation moved out of the safepoint path.
 757     do_collection(true,                      // full
 758                   false,                     // clear_all_soft_refs
 759                   size,                      // size
 760                   is_tlab,                   // is_tlab
 761                   GenCollectedHeap::OldGen); // max_generation
 762   }
 763 
 764   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 765 
 766   if (result != NULL) {
 767     assert(is_in_reserved(result), "result not in heap");
 768     return result;
 769   }
 770 
 771   // OK, collection failed, try expansion.
 772   result = expand_heap_and_allocate(size, is_tlab);
 773   if (result != NULL) {
 774     return result;
 775   }
 776 
 777   // If we reach this point, we're really out of memory. Try every trick
 778   // we can to reclaim memory. Force collection of soft references. Force
 779   // a complete compaction of the heap. Any additional methods for finding
 780   // free memory should be here, especially if they are expensive. If this
 781   // attempt fails, an OOM exception will be thrown.
 782   {
 783     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 784 
 785     do_collection(true,                      // full
 786                   true,                      // clear_all_soft_refs
 787                   size,                      // size
 788                   is_tlab,                   // is_tlab
 789                   GenCollectedHeap::OldGen); // max_generation
 790   }
 791 
 792   result = attempt_allocation(size, is_tlab, false /* first_only */);
 793   if (result != NULL) {
 794     assert(is_in_reserved(result), "result not in heap");
 795     return result;
 796   }
 797 
 798   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 799     "Flag should have been handled and cleared prior to this point");
 800 
 801   // What else?  We might try synchronous finalization later.  If the total
 802   // space available is large enough for the allocation, then a more
 803   // complete compaction phase than we've tried so far might be
 804   // appropriate.
 805   return NULL;
 806 }
 807 
 808 #ifdef ASSERT
 809 class AssertNonScavengableClosure: public OopClosure {
 810 public:
 811   virtual void do_oop(oop* p) {
 812     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 813       "Referent should not be scavengable.");  }
 814   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 815 };
 816 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 817 #endif
 818 
 819 void GenCollectedHeap::process_roots(StrongRootsScope* scope,
 820                                      ScanningOption so,
 821                                      OopClosure* strong_roots,
 822                                      CLDClosure* strong_cld_closure,
 823                                      CLDClosure* weak_cld_closure,
 824                                      CodeBlobToOopClosure* code_roots) {
 825   // General roots.
 826   assert(code_roots != NULL, "code root closure should always be set");
 827   // _n_termination for _process_strong_tasks should be set up stream
 828   // in a method not running in a GC worker.  Otherwise the GC worker
 829   // could be trying to change the termination condition while the task
 830   // is executing in another GC worker.
 831 
 832   if (_process_strong_tasks->try_claim_task(GCH_PS_ClassLoaderDataGraph_oops_do)) {
 833     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 834   }
 835 
 836   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 837   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 838 
 839   bool is_par = scope->n_threads() > 1;
 840   Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);
 841 
 842   if (_process_strong_tasks->try_claim_task(GCH_PS_Universe_oops_do)) {
 843     Universe::oops_do(strong_roots);
 844   }
 845   // Global (strong) JNI handles
 846   if (_process_strong_tasks->try_claim_task(GCH_PS_JNIHandles_oops_do)) {
 847     JNIHandles::oops_do(strong_roots);
 848   }
 849 
 850   if (_process_strong_tasks->try_claim_task(GCH_PS_ObjectSynchronizer_oops_do)) {
 851     ObjectSynchronizer::oops_do(strong_roots);
 852   }
 853   if (_process_strong_tasks->try_claim_task(GCH_PS_Management_oops_do)) {
 854     Management::oops_do(strong_roots);
 855   }
 856   if (_process_strong_tasks->try_claim_task(GCH_PS_jvmti_oops_do)) {
 857     JvmtiExport::oops_do(strong_roots);
 858   }
 859 #if INCLUDE_AOT
 860   if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
 861     AOTLoader::oops_do(strong_roots);
 862   }
 863 #endif
 864   if (_process_strong_tasks->try_claim_task(GCH_PS_SystemDictionary_oops_do)) {
 865     SystemDictionary::oops_do(strong_roots);
 866   }
 867 
 868   if (_process_strong_tasks->try_claim_task(GCH_PS_CodeCache_oops_do)) {
 869     if (so & SO_ScavengeCodeCache) {
 870       assert(code_roots != NULL, "must supply closure for code cache");
 871 
 872       // We only visit parts of the CodeCache when scavenging.
 873       ScavengableNMethods::nmethods_do(code_roots);
 874     }
 875     if (so & SO_AllCodeCache) {
 876       assert(code_roots != NULL, "must supply closure for code cache");
 877 
 878       // CMSCollector uses this to do intermediate-strength collections.
 879       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 880       CodeCache::blobs_do(code_roots);
 881     }
 882     // Verify that the code cache contents are not subject to
 883     // movement by a scavenging collection.
 884     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 885     DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 886   }
 887 }
 888 
 889 void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
 890                                            OopsInGenClosure* root_closure,
 891                                            OopsInGenClosure* old_gen_closure,
 892                                            CLDClosure* cld_closure) {
 893   MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);
 894 
 895   process_roots(scope, SO_ScavengeCodeCache, root_closure,
 896                 cld_closure, cld_closure, &mark_code_closure);
 897 
 898   if (_process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
 899     root_closure->reset_generation();
 900   }
 901 
 902   // When collection is parallel, all threads get to cooperate to do
 903   // old generation scanning.
 904   old_gen_closure->set_generation(_old_gen);
 905   rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads());
 906   old_gen_closure->reset_generation();
 907 
 908   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 909 }
 910 
 911 void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
 912                                           bool is_adjust_phase,
 913                                           ScanningOption so,
 914                                           bool only_strong_roots,
 915                                           OopsInGenClosure* root_closure,
 916                                           CLDClosure* cld_closure) {
 917   MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
 918   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 919 
 920   process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
 921   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 922 }
 923 
 924 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 925   WeakProcessor::oops_do(root_closure);
 926   _young_gen->ref_processor()->weak_oops_do(root_closure);
 927   _old_gen->ref_processor()->weak_oops_do(root_closure);
 928 }
 929 
 930 bool GenCollectedHeap::no_allocs_since_save_marks() {
 931   return _young_gen->no_allocs_since_save_marks() &&
 932          _old_gen->no_allocs_since_save_marks();
 933 }
 934 
 935 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 936   return _young_gen->supports_inline_contig_alloc();
 937 }
 938 
 939 HeapWord* volatile* GenCollectedHeap::top_addr() const {
 940   return _young_gen->top_addr();
 941 }
 942 
 943 HeapWord** GenCollectedHeap::end_addr() const {
 944   return _young_gen->end_addr();
 945 }
 946 
 947 // public collection interfaces
 948 
 949 void GenCollectedHeap::collect(GCCause::Cause cause) {
 950   if ((cause == GCCause::_wb_young_gc) ||
 951       (cause == GCCause::_gc_locker)) {
 952     // Young collection for WhiteBox or GCLocker.
 953     collect(cause, YoungGen);
 954   } else {
 955 #ifdef ASSERT
 956   if (cause == GCCause::_scavenge_alot) {
 957     // Young collection only.
 958     collect(cause, YoungGen);
 959   } else {
 960     // Stop-the-world full collection.
 961     collect(cause, OldGen);
 962   }
 963 #else
 964     // Stop-the-world full collection.
 965     collect(cause, OldGen);
 966 #endif
 967   }
 968 }
 969 
 970 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
 971   // The caller doesn't have the Heap_lock
 972   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 973   MutexLocker ml(Heap_lock);
 974   collect_locked(cause, max_generation);
 975 }
 976 
 977 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 978   // The caller has the Heap_lock
 979   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 980   collect_locked(cause, OldGen);
 981 }
 982 
 983 // this is the private collection interface
 984 // The Heap_lock is expected to be held on entry.
 985 
 986 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
 987   // Read the GC count while holding the Heap_lock
 988   unsigned int gc_count_before      = total_collections();
 989   unsigned int full_gc_count_before = total_full_collections();
 990 
 991   if (GCLocker::should_discard(cause, gc_count_before)) {
 992     return;
 993   }
 994 
 995   {
 996     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 997     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 998                          cause, max_generation);
 999     VMThread::execute(&op);
1000   }
1001 }
1002 
1003 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1004    do_full_collection(clear_all_soft_refs, OldGen);
1005 }
1006 
1007 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
1008                                           GenerationType last_generation) {
1009   do_collection(true,                   // full
1010                 clear_all_soft_refs,    // clear_all_soft_refs
1011                 0,                      // size
1012                 false,                  // is_tlab
1013                 last_generation);       // last_generation
1014   // Hack XXX FIX ME !!!
1015   // A scavenge may not have been attempted, or may have
1016   // been attempted and failed, because the old gen was too full
1017   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
1018     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
1019     // This time allow the old gen to be collected as well
1020     do_collection(true,                // full
1021                   clear_all_soft_refs, // clear_all_soft_refs
1022                   0,                   // size
1023                   false,               // is_tlab
1024                   OldGen);             // last_generation
1025   }
1026 }
1027 
1028 bool GenCollectedHeap::is_in_young(oop p) {
1029   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
1030   assert(result == _young_gen->is_in_reserved(p),
1031          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
1032   return result;
1033 }
1034 
1035 // Returns "TRUE" iff "p" points into the committed areas of the heap.
1036 bool GenCollectedHeap::is_in(const void* p) const {
1037   return _young_gen->is_in(p) || _old_gen->is_in(p);
1038 }
1039 
1040 #ifdef ASSERT
1041 // Don't implement this by using is_in_young().  This method is used
1042 // in some cases to check that is_in_young() is correct.
1043 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
1044   assert(is_in_reserved(p) || p == NULL,
1045     "Does not work if address is non-null and outside of the heap");
1046   return p < _young_gen->reserved().end() && p != NULL;
1047 }
1048 #endif
1049 
1050 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
1051   _young_gen->oop_iterate(cl);
1052   _old_gen->oop_iterate(cl);
1053 }
1054 
1055 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
1056   _young_gen->object_iterate(cl);
1057   _old_gen->object_iterate(cl);
1058 }
1059 
1060 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
1061   _young_gen->safe_object_iterate(cl);
1062   _old_gen->safe_object_iterate(cl);
1063 }
1064 
1065 Space* GenCollectedHeap::space_containing(const void* addr) const {
1066   Space* res = _young_gen->space_containing(addr);
1067   if (res != NULL) {
1068     return res;
1069   }
1070   res = _old_gen->space_containing(addr);
1071   assert(res != NULL, "Could not find containing space");
1072   return res;
1073 }
1074 
1075 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
1076   assert(is_in_reserved(addr), "block_start of address outside of heap");
1077   if (_young_gen->is_in_reserved(addr)) {
1078     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
1079     return _young_gen->block_start(addr);
1080   }
1081 
1082   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
1083   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
1084   return _old_gen->block_start(addr);
1085 }
1086 
1087 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1088   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
1089   assert(block_start(addr) == addr, "addr must be a block start");
1090   if (_young_gen->is_in_reserved(addr)) {
1091     return _young_gen->block_is_obj(addr);
1092   }
1093 
1094   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
1095   return _old_gen->block_is_obj(addr);
1096 }
1097 
1098 bool GenCollectedHeap::supports_tlab_allocation() const {
1099   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1100   return _young_gen->supports_tlab_allocation();
1101 }
1102 
1103 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
1104   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1105   if (_young_gen->supports_tlab_allocation()) {
1106     return _young_gen->tlab_capacity();
1107   }
1108   return 0;
1109 }
1110 
1111 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
1112   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1113   if (_young_gen->supports_tlab_allocation()) {
1114     return _young_gen->tlab_used();
1115   }
1116   return 0;
1117 }
1118 
1119 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1120   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1121   if (_young_gen->supports_tlab_allocation()) {
1122     return _young_gen->unsafe_max_tlab_alloc();
1123   }
1124   return 0;
1125 }
1126 
1127 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
1128                                               size_t requested_size,
1129                                               size_t* actual_size) {
1130   bool gc_overhead_limit_was_exceeded;
1131   HeapWord* result = mem_allocate_work(requested_size /* size */,
1132                                        true /* is_tlab */,
1133                                        &gc_overhead_limit_was_exceeded);
1134   if (result != NULL) {
1135     *actual_size = requested_size;
1136   }
1137 
1138   return result;
1139 }
1140 
1141 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1142 // from the list headed by "*prev_ptr".
1143 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1144   bool first = true;
1145   size_t min_size = 0;   // "first" makes this conceptually infinite.
1146   ScratchBlock **smallest_ptr, *smallest;
1147   ScratchBlock  *cur = *prev_ptr;
1148   while (cur) {
1149     assert(*prev_ptr == cur, "just checking");
1150     if (first || cur->num_words < min_size) {
1151       smallest_ptr = prev_ptr;
1152       smallest     = cur;
1153       min_size     = smallest->num_words;
1154       first        = false;
1155     }
1156     prev_ptr = &cur->next;
1157     cur     =  cur->next;
1158   }
1159   smallest      = *smallest_ptr;
1160   *smallest_ptr = smallest->next;
1161   return smallest;
1162 }
1163 
1164 // Sort the scratch block list headed by res into decreasing size order,
1165 // and set "res" to the result.
1166 static void sort_scratch_list(ScratchBlock*& list) {
1167   ScratchBlock* sorted = NULL;
1168   ScratchBlock* unsorted = list;
1169   while (unsorted) {
1170     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1171     smallest->next  = sorted;
1172     sorted          = smallest;
1173   }
1174   list = sorted;
1175 }
1176 
1177 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1178                                                size_t max_alloc_words) {
1179   ScratchBlock* res = NULL;
1180   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
1181   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
1182   sort_scratch_list(res);
1183   return res;
1184 }
1185 
1186 void GenCollectedHeap::release_scratch() {
1187   _young_gen->reset_scratch();
1188   _old_gen->reset_scratch();
1189 }
1190 
1191 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1192   void do_generation(Generation* gen) {
1193     gen->prepare_for_verify();
1194   }
1195 };
1196 
1197 void GenCollectedHeap::prepare_for_verify() {
1198   ensure_parsability(false);        // no need to retire TLABs
1199   GenPrepareForVerifyClosure blk;
1200   generation_iterate(&blk, false);
1201 }
1202 
1203 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1204                                           bool old_to_young) {
1205   if (old_to_young) {
1206     cl->do_generation(_old_gen);
1207     cl->do_generation(_young_gen);
1208   } else {
1209     cl->do_generation(_young_gen);
1210     cl->do_generation(_old_gen);
1211   }
1212 }
1213 
1214 bool GenCollectedHeap::is_maximal_no_gc() const {
1215   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1216 }
1217 
1218 void GenCollectedHeap::save_marks() {
1219   _young_gen->save_marks();
1220   _old_gen->save_marks();
1221 }
1222 
1223 GenCollectedHeap* GenCollectedHeap::heap() {
1224   CollectedHeap* heap = Universe::heap();
1225   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1226   assert(heap->kind() == CollectedHeap::Serial ||
1227          heap->kind() == CollectedHeap::CMS, "Invalid name");
1228   return (GenCollectedHeap*) heap;
1229 }
1230 
1231 #if INCLUDE_SERIALGC
1232 void GenCollectedHeap::prepare_for_compaction() {
1233   // Start by compacting into same gen.
1234   CompactPoint cp(_old_gen);
1235   _old_gen->prepare_for_compaction(&cp);
1236   _young_gen->prepare_for_compaction(&cp);
1237 }
1238 #endif // INCLUDE_SERIALGC
1239 
1240 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1241   log_debug(gc, verify)("%s", _old_gen->name());
1242   _old_gen->verify();
1243 
1244   log_debug(gc, verify)("%s", _old_gen->name());
1245   _young_gen->verify();
1246 
1247   log_debug(gc, verify)("RemSet");
1248   rem_set()->verify();
1249 }
1250 
1251 void GenCollectedHeap::print_on(outputStream* st) const {
1252   _young_gen->print_on(st);
1253   _old_gen->print_on(st);
1254   MetaspaceUtils::print_on(st);
1255 }
1256 
1257 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1258 }
1259 
1260 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1261 }
1262 
1263 void GenCollectedHeap::print_tracing_info() const {
1264   if (log_is_enabled(Debug, gc, heap, exit)) {
1265     LogStreamHandle(Debug, gc, heap, exit) lsh;
1266     _young_gen->print_summary_info_on(&lsh);
1267     _old_gen->print_summary_info_on(&lsh);
1268   }
1269 }
1270 
1271 void GenCollectedHeap::print_heap_change(size_t young_prev_used, size_t old_prev_used) const {
1272   log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1273                      _young_gen->short_name(), young_prev_used / K, _young_gen->used() /K, _young_gen->capacity() /K);
1274   log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1275                      _old_gen->short_name(), old_prev_used / K, _old_gen->used() /K, _old_gen->capacity() /K);
1276 }
1277 
1278 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1279  private:
1280   bool _full;
1281  public:
1282   void do_generation(Generation* gen) {
1283     gen->gc_prologue(_full);
1284   }
1285   GenGCPrologueClosure(bool full) : _full(full) {};
1286 };
1287 
1288 void GenCollectedHeap::gc_prologue(bool full) {
1289   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1290 
1291   // Fill TLAB's and such
1292   ensure_parsability(true);   // retire TLABs
1293 
1294   // Walk generations
1295   GenGCPrologueClosure blk(full);
1296   generation_iterate(&blk, false);  // not old-to-young.
1297 };
1298 
1299 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1300  private:
1301   bool _full;
1302  public:
1303   void do_generation(Generation* gen) {
1304     gen->gc_epilogue(_full);
1305   }
1306   GenGCEpilogueClosure(bool full) : _full(full) {};
1307 };
1308 
1309 void GenCollectedHeap::gc_epilogue(bool full) {
1310 #if COMPILER2_OR_JVMCI
1311   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1312   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1313   guarantee(is_client_compilation_mode_vm() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1314 #endif // COMPILER2_OR_JVMCI
1315 
1316   resize_all_tlabs();
1317 
1318   GenGCEpilogueClosure blk(full);
1319   generation_iterate(&blk, false);  // not old-to-young.
1320 
1321   if (!CleanChunkPoolAsync) {
1322     Chunk::clean_chunk_pool();
1323   }
1324 
1325   MetaspaceCounters::update_performance_counters();
1326   CompressedClassSpaceCounters::update_performance_counters();
1327 };
1328 
1329 #ifndef PRODUCT
1330 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1331  private:
1332  public:
1333   void do_generation(Generation* gen) {
1334     gen->record_spaces_top();
1335   }
1336 };
1337 
1338 void GenCollectedHeap::record_gen_tops_before_GC() {
1339   if (ZapUnusedHeapArea) {
1340     GenGCSaveTopsBeforeGCClosure blk;
1341     generation_iterate(&blk, false);  // not old-to-young.
1342   }
1343 }
1344 #endif  // not PRODUCT
1345 
1346 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1347  public:
1348   void do_generation(Generation* gen) {
1349     gen->ensure_parsability();
1350   }
1351 };
1352 
1353 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1354   CollectedHeap::ensure_parsability(retire_tlabs);
1355   GenEnsureParsabilityClosure ep_cl;
1356   generation_iterate(&ep_cl, false);
1357 }
1358 
1359 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1360                                               oop obj,
1361                                               size_t obj_size) {
1362   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1363   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1364   HeapWord* result = NULL;
1365 
1366   result = old_gen->expand_and_allocate(obj_size, false);
1367 
1368   if (result != NULL) {
1369     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1370   }
1371   return oop(result);
1372 }
1373 
1374 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1375   jlong _time;   // in ms
1376   jlong _now;    // in ms
1377 
1378  public:
1379   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1380 
1381   jlong time() { return _time; }
1382 
1383   void do_generation(Generation* gen) {
1384     _time = MIN2(_time, gen->time_of_last_gc(_now));
1385   }
1386 };
1387 
1388 jlong GenCollectedHeap::millis_since_last_gc() {
1389   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1390   // provided the underlying platform provides such a time source
1391   // (and it is bug free). So we still have to guard against getting
1392   // back a time later than 'now'.
1393   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1394   GenTimeOfLastGCClosure tolgc_cl(now);
1395   // iterate over generations getting the oldest
1396   // time that a generation was collected
1397   generation_iterate(&tolgc_cl, false);
1398 
1399   jlong retVal = now - tolgc_cl.time();
1400   if (retVal < 0) {
1401     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
1402        ". returning zero instead.", retVal);
1403     return 0;
1404   }
1405   return retVal;
1406 }