1 /*
   2  * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/collectorCounters.hpp"
  33 #include "gc/shared/gcId.hpp"
  34 #include "gc/shared/gcLocker.inline.hpp"
  35 #include "gc/shared/gcTrace.hpp"
  36 #include "gc/shared/gcTraceTime.inline.hpp"
  37 #include "gc/shared/genCollectedHeap.hpp"
  38 #include "gc/shared/genOopClosures.inline.hpp"
  39 #include "gc/shared/generationSpec.hpp"
  40 #include "gc/shared/space.hpp"
  41 #include "gc/shared/strongRootsScope.hpp"
  42 #include "gc/shared/vmGCOperations.hpp"
  43 #include "gc/shared/workgroup.hpp"
  44 #include "memory/filemap.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/biasedLocking.hpp"
  48 #include "runtime/fprofiler.hpp"
  49 #include "runtime/handles.hpp"
  50 #include "runtime/handles.inline.hpp"
  51 #include "runtime/java.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "services/management.hpp"
  54 #include "services/memoryService.hpp"
  55 #include "utilities/macros.hpp"
  56 #include "utilities/stack.inline.hpp"
  57 #include "utilities/vmError.hpp"
  58 #if INCLUDE_ALL_GCS
  59 #include "gc/cms/concurrentMarkSweepThread.hpp"
  60 #include "gc/cms/vmCMSOperations.hpp"
  61 #endif // INCLUDE_ALL_GCS
  62 
  63 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
  64 
  65 // The set of potentially parallel tasks in root scanning.
  66 enum GCH_strong_roots_tasks {
  67   GCH_PS_Universe_oops_do,
  68   GCH_PS_JNIHandles_oops_do,
  69   GCH_PS_ObjectSynchronizer_oops_do,
  70   GCH_PS_FlatProfiler_oops_do,
  71   GCH_PS_Management_oops_do,
  72   GCH_PS_SystemDictionary_oops_do,
  73   GCH_PS_ClassLoaderDataGraph_oops_do,
  74   GCH_PS_jvmti_oops_do,
  75   GCH_PS_CodeCache_oops_do,
  76   GCH_PS_younger_gens,
  77   // Leave this one last.
  78   GCH_PS_NumElements
  79 };
  80 
  81 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
  82   CollectedHeap(),
  83   _rem_set(NULL),
  84   _gen_policy(policy),
  85   _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
  86   _full_collections_completed(0)
  87 {
  88   assert(policy != NULL, "Sanity check");
  89   if (UseConcMarkSweepGC) {
  90     _workers = new WorkGang("GC Thread", ParallelGCThreads,
  91                             /* are_GC_task_threads */true,
  92                             /* are_ConcurrentGC_threads */false);
  93     _workers->initialize_workers();
  94   } else {
  95     // Serial GC does not use workers.
  96     _workers = NULL;
  97   }
  98 }
  99 
 100 jint GenCollectedHeap::initialize() {
 101   CollectedHeap::pre_initialize();
 102 
 103   // While there are no constraints in the GC code that HeapWordSize
 104   // be any particular value, there are multiple other areas in the
 105   // system which believe this to be true (e.g. oop->object_size in some
 106   // cases incorrectly returns the size in wordSize units rather than
 107   // HeapWordSize).
 108   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 109 
 110   // Allocate space for the heap.
 111 
 112   char* heap_address;
 113   ReservedSpace heap_rs;
 114 
 115   size_t heap_alignment = collector_policy()->heap_alignment();
 116 
 117   heap_address = allocate(heap_alignment, &heap_rs);
 118 
 119   if (!heap_rs.is_reserved()) {
 120     vm_shutdown_during_initialization(
 121       "Could not reserve enough space for object heap");
 122     return JNI_ENOMEM;
 123   }
 124 
 125   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 126 
 127   _rem_set = collector_policy()->create_rem_set(reserved_region());
 128   set_barrier_set(rem_set()->bs());
 129 
 130   ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
 131   _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
 132   heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
 133 
 134   ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
 135   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
 136   clear_incremental_collection_failed();
 137 
 138 #if INCLUDE_ALL_GCS
 139   // If we are running CMS, create the collector responsible
 140   // for collecting the CMS generations.
 141   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 142     bool success = create_cms_collector();
 143     if (!success) return JNI_ENOMEM;
 144   }
 145 #endif // INCLUDE_ALL_GCS
 146 
 147   return JNI_OK;
 148 }
 149 
 150 char* GenCollectedHeap::allocate(size_t alignment,
 151                                  ReservedSpace* heap_rs){
 152   // Now figure out the total size.
 153   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 154   assert(alignment % pageSize == 0, "Must be");
 155 
 156   GenerationSpec* young_spec = gen_policy()->young_gen_spec();
 157   GenerationSpec* old_spec = gen_policy()->old_gen_spec();
 158 
 159   // Check for overflow.
 160   size_t total_reserved = young_spec->max_size() + old_spec->max_size();
 161   if (total_reserved < young_spec->max_size()) {
 162     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 163                                   "the maximum representable size");
 164   }
 165   assert(total_reserved % alignment == 0,
 166          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 167          SIZE_FORMAT, total_reserved, alignment);
 168 
 169   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 170 
 171   os::trace_page_sizes("Heap",
 172                        collector_policy()->min_heap_byte_size(),
 173                        total_reserved,
 174                        alignment,
 175                        heap_rs->base(),
 176                        heap_rs->size());
 177 
 178   return heap_rs->base();
 179 }
 180 
 181 void GenCollectedHeap::post_initialize() {
 182   ref_processing_init();
 183   assert((_young_gen->kind() == Generation::DefNew) ||
 184          (_young_gen->kind() == Generation::ParNew),
 185     "Wrong youngest generation type");
 186   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 187 
 188   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
 189          _old_gen->kind() == Generation::MarkSweepCompact,
 190     "Wrong generation kind");
 191 
 192   _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 193                                       _old_gen->capacity(),
 194                                       def_new_gen->from()->capacity());
 195   _gen_policy->initialize_gc_policy_counters();
 196 }
 197 
 198 void GenCollectedHeap::ref_processing_init() {
 199   _young_gen->ref_processor_init();
 200   _old_gen->ref_processor_init();
 201 }
 202 
 203 size_t GenCollectedHeap::capacity() const {
 204   return _young_gen->capacity() + _old_gen->capacity();
 205 }
 206 
 207 size_t GenCollectedHeap::used() const {
 208   return _young_gen->used() + _old_gen->used();
 209 }
 210 
 211 void GenCollectedHeap::save_used_regions() {
 212   _old_gen->save_used_region();
 213   _young_gen->save_used_region();
 214 }
 215 
 216 size_t GenCollectedHeap::max_capacity() const {
 217   return _young_gen->max_capacity() + _old_gen->max_capacity();
 218 }
 219 
 220 // Update the _full_collections_completed counter
 221 // at the end of a stop-world full GC.
 222 unsigned int GenCollectedHeap::update_full_collections_completed() {
 223   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 224   assert(_full_collections_completed <= _total_full_collections,
 225          "Can't complete more collections than were started");
 226   _full_collections_completed = _total_full_collections;
 227   ml.notify_all();
 228   return _full_collections_completed;
 229 }
 230 
 231 // Update the _full_collections_completed counter, as appropriate,
 232 // at the end of a concurrent GC cycle. Note the conditional update
 233 // below to allow this method to be called by a concurrent collector
 234 // without synchronizing in any manner with the VM thread (which
 235 // may already have initiated a STW full collection "concurrently").
 236 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
 237   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 238   assert((_full_collections_completed <= _total_full_collections) &&
 239          (count <= _total_full_collections),
 240          "Can't complete more collections than were started");
 241   if (count > _full_collections_completed) {
 242     _full_collections_completed = count;
 243     ml.notify_all();
 244   }
 245   return _full_collections_completed;
 246 }
 247 
 248 
 249 #ifndef PRODUCT
 250 // Override of memory state checking method in CollectedHeap:
 251 // Some collectors (CMS for example) can't have badHeapWordVal written
 252 // in the first two words of an object. (For instance , in the case of
 253 // CMS these words hold state used to synchronize between certain
 254 // (concurrent) GC steps and direct allocating mutators.)
 255 // The skip_header_HeapWords() method below, allows us to skip
 256 // over the requisite number of HeapWord's. Note that (for
 257 // generational collectors) this means that those many words are
 258 // skipped in each object, irrespective of the generation in which
 259 // that object lives. The resultant loss of precision seems to be
 260 // harmless and the pain of avoiding that imprecision appears somewhat
 261 // higher than we are prepared to pay for such rudimentary debugging
 262 // support.
 263 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
 264                                                          size_t size) {
 265   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 266     // We are asked to check a size in HeapWords,
 267     // but the memory is mangled in juint words.
 268     juint* start = (juint*) (addr + skip_header_HeapWords());
 269     juint* end   = (juint*) (addr + size);
 270     for (juint* slot = start; slot < end; slot += 1) {
 271       assert(*slot == badHeapWordVal,
 272              "Found non badHeapWordValue in pre-allocation check");
 273     }
 274   }
 275 }
 276 #endif
 277 
 278 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 279                                                bool is_tlab,
 280                                                bool first_only) {
 281   HeapWord* res = NULL;
 282 
 283   if (_young_gen->should_allocate(size, is_tlab)) {
 284     res = _young_gen->allocate(size, is_tlab);
 285     if (res != NULL || first_only) {
 286       return res;
 287     }
 288   }
 289 
 290   if (_old_gen->should_allocate(size, is_tlab)) {
 291     res = _old_gen->allocate(size, is_tlab);
 292   }
 293 
 294   return res;
 295 }
 296 
 297 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 298                                          bool* gc_overhead_limit_was_exceeded) {
 299   return gen_policy()->mem_allocate_work(size,
 300                                          false /* is_tlab */,
 301                                          gc_overhead_limit_was_exceeded);
 302 }
 303 
 304 bool GenCollectedHeap::must_clear_all_soft_refs() {
 305   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 306          _gc_cause == GCCause::_wb_full_gc;
 307 }
 308 
 309 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 310   if (!UseConcMarkSweepGC) {
 311     return false;
 312   }
 313 
 314   switch (cause) {
 315     case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
 316     case GCCause::_java_lang_system_gc:
 317     case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
 318     default:                            return false;
 319   }
 320 }
 321 
 322 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 323                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 324                                           bool restore_marks_for_biased_locking) {
 325   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 326   GCTraceTime(Trace, gc, phases) t1(title);
 327   TraceCollectorStats tcs(gen->counters());
 328   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 329 
 330   gen->stat_record()->invocations++;
 331   gen->stat_record()->accumulated_time.start();
 332 
 333   // Must be done anew before each collection because
 334   // a previous collection will do mangling and will
 335   // change top of some spaces.
 336   record_gen_tops_before_GC();
 337 
 338   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 339 
 340   if (run_verification && VerifyBeforeGC) {
 341     HandleMark hm;  // Discard invalid handles created during verification
 342     Universe::verify("Before GC");
 343   }
 344   COMPILER2_PRESENT(DerivedPointerTable::clear());
 345 
 346   if (restore_marks_for_biased_locking) {
 347     // We perform this mark word preservation work lazily
 348     // because it's only at this point that we know whether we
 349     // absolutely have to do it; we want to avoid doing it for
 350     // scavenge-only collections where it's unnecessary
 351     BiasedLocking::preserve_marks();
 352   }
 353 
 354   // Do collection work
 355   {
 356     // Note on ref discovery: For what appear to be historical reasons,
 357     // GCH enables and disabled (by enqueing) refs discovery.
 358     // In the future this should be moved into the generation's
 359     // collect method so that ref discovery and enqueueing concerns
 360     // are local to a generation. The collect method could return
 361     // an appropriate indication in the case that notification on
 362     // the ref lock was needed. This will make the treatment of
 363     // weak refs more uniform (and indeed remove such concerns
 364     // from GCH). XXX
 365 
 366     HandleMark hm;  // Discard invalid handles created during gc
 367     save_marks();   // save marks for all gens
 368     // We want to discover references, but not process them yet.
 369     // This mode is disabled in process_discovered_references if the
 370     // generation does some collection work, or in
 371     // enqueue_discovered_references if the generation returns
 372     // without doing any work.
 373     ReferenceProcessor* rp = gen->ref_processor();
 374     // If the discovery of ("weak") refs in this generation is
 375     // atomic wrt other collectors in this configuration, we
 376     // are guaranteed to have empty discovered ref lists.
 377     if (rp->discovery_is_atomic()) {
 378       rp->enable_discovery();
 379       rp->setup_policy(clear_soft_refs);
 380     } else {
 381       // collect() below will enable discovery as appropriate
 382     }
 383     gen->collect(full, clear_soft_refs, size, is_tlab);
 384     if (!rp->enqueuing_is_done()) {
 385       rp->enqueue_discovered_references();
 386     } else {
 387       rp->set_enqueuing_is_done(false);
 388     }
 389     rp->verify_no_references_recorded();
 390   }
 391 
 392   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 393 
 394   gen->stat_record()->accumulated_time.stop();
 395 
 396   update_gc_stats(gen, full);
 397 
 398   if (run_verification && VerifyAfterGC) {
 399     HandleMark hm;  // Discard invalid handles created during verification
 400     Universe::verify("After GC");
 401   }
 402 }
 403 
 404 void GenCollectedHeap::do_collection(bool           full,
 405                                      bool           clear_all_soft_refs,
 406                                      size_t         size,
 407                                      bool           is_tlab,
 408                                      GenerationType max_generation) {
 409   ResourceMark rm;
 410   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 411 
 412   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 413   assert(my_thread->is_VM_thread() ||
 414          my_thread->is_ConcurrentGC_thread(),
 415          "incorrect thread type capability");
 416   assert(Heap_lock->is_locked(),
 417          "the requesting thread should have the Heap_lock");
 418   guarantee(!is_gc_active(), "collection is not reentrant");
 419 
 420   if (GCLocker::check_active_before_gc()) {
 421     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 422   }
 423 
 424   GCIdMarkAndRestore gc_id_mark;
 425 
 426   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 427                           collector_policy()->should_clear_all_soft_refs();
 428 
 429   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 430 
 431   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 432 
 433   print_heap_before_gc();
 434 
 435   {
 436     FlagSetting fl(_is_gc_active, true);
 437 
 438     bool complete = full && (max_generation == OldGen);
 439     bool old_collects_young = complete && !ScavengeBeforeFullGC;
 440     bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 441 
 442     FormatBuffer<> gc_string("%s", "Pause ");
 443     if (do_young_collection) {
 444       gc_string.append("Young");
 445     } else {
 446       gc_string.append("Full");
 447     }
 448 
 449     GCTraceCPUTime tcpu;
 450     GCTraceTime(Info, gc) t(gc_string, NULL, gc_cause(), true);
 451 
 452     gc_prologue(complete);
 453     increment_total_collections(complete);
 454 
 455     size_t young_prev_used = _young_gen->used();
 456     size_t old_prev_used = _old_gen->used();
 457 
 458     bool run_verification = total_collections() >= VerifyGCStartAt;
 459 
 460     bool prepared_for_verification = false;
 461     bool collected_old = false;
 462 
 463     if (do_young_collection) {
 464       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 465         prepare_for_verify();
 466         prepared_for_verification = true;
 467       }
 468 
 469       collect_generation(_young_gen,
 470                          full,
 471                          size,
 472                          is_tlab,
 473                          run_verification && VerifyGCLevel <= 0,
 474                          do_clear_all_soft_refs,
 475                          false);
 476 
 477       if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 478           size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 479         // Allocation request was met by young GC.
 480         size = 0;
 481       }
 482     }
 483 
 484     bool must_restore_marks_for_biased_locking = false;
 485 
 486     if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) {
 487       if (!complete) {
 488         // The full_collections increment was missed above.
 489         increment_total_full_collections();
 490       }
 491 
 492       if (!prepared_for_verification && run_verification &&
 493           VerifyGCLevel <= 1 && VerifyBeforeGC) {
 494         prepare_for_verify();
 495       }
 496 
 497       if (do_young_collection) {
 498         // We did a young GC. Need a new GC id for the old GC.
 499         GCIdMarkAndRestore gc_id_mark;
 500         GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause(), true);
 501         collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true);
 502       } else {
 503         // No young GC done. Use the same GC id as was set up earlier in this method.
 504         collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true);
 505       }
 506 
 507       must_restore_marks_for_biased_locking = true;
 508       collected_old = true;
 509     }
 510 
 511     // Update "complete" boolean wrt what actually transpired --
 512     // for instance, a promotion failure could have led to
 513     // a whole heap collection.
 514     complete = complete || collected_old;
 515 
 516     print_heap_change(young_prev_used, old_prev_used);
 517     MetaspaceAux::print_metaspace_change(metadata_prev_used);
 518 
 519     // Adjust generation sizes.
 520     if (collected_old) {
 521       _old_gen->compute_new_size();
 522     }
 523     _young_gen->compute_new_size();
 524 
 525     if (complete) {
 526       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 527       ClassLoaderDataGraph::purge();
 528       MetaspaceAux::verify_metrics();
 529       // Resize the metaspace capacity after full collections
 530       MetaspaceGC::compute_new_size();
 531       update_full_collections_completed();
 532     }
 533 
 534     // Track memory usage and detect low memory after GC finishes
 535     MemoryService::track_memory_usage();
 536 
 537     gc_epilogue(complete);
 538 
 539     if (must_restore_marks_for_biased_locking) {
 540       BiasedLocking::restore_marks();
 541     }
 542   }
 543 
 544   print_heap_after_gc();
 545 
 546 #ifdef TRACESPINNING
 547   ParallelTaskTerminator::print_termination_counts();
 548 #endif
 549 }
 550 
 551 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 552   return gen_policy()->satisfy_failed_allocation(size, is_tlab);
 553 }
 554 
 555 #ifdef ASSERT
 556 class AssertNonScavengableClosure: public OopClosure {
 557 public:
 558   virtual void do_oop(oop* p) {
 559     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 560       "Referent should not be scavengable.");  }
 561   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 562 };
 563 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 564 #endif
 565 
 566 void GenCollectedHeap::process_roots(StrongRootsScope* scope,
 567                                      ScanningOption so,
 568                                      OopClosure* strong_roots,
 569                                      OopClosure* weak_roots,
 570                                      CLDClosure* strong_cld_closure,
 571                                      CLDClosure* weak_cld_closure,
 572                                      CodeBlobToOopClosure* code_roots) {
 573   // General roots.
 574   assert(Threads::thread_claim_parity() != 0, "must have called prologue code");
 575   assert(code_roots != NULL, "code root closure should always be set");
 576   // _n_termination for _process_strong_tasks should be set up stream
 577   // in a method not running in a GC worker.  Otherwise the GC worker
 578   // could be trying to change the termination condition while the task
 579   // is executing in another GC worker.
 580 
 581   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
 582     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 583   }
 584 
 585   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 586   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 587 
 588   bool is_par = scope->n_threads() > 1;
 589   Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);
 590 
 591   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
 592     Universe::oops_do(strong_roots);
 593   }
 594   // Global (strong) JNI handles
 595   if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
 596     JNIHandles::oops_do(strong_roots);
 597   }
 598 
 599   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
 600     ObjectSynchronizer::oops_do(strong_roots);
 601   }
 602   if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) {
 603     FlatProfiler::oops_do(strong_roots);
 604   }
 605   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
 606     Management::oops_do(strong_roots);
 607   }
 608   if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
 609     JvmtiExport::oops_do(strong_roots);
 610   }
 611 
 612   if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
 613     SystemDictionary::roots_oops_do(strong_roots, weak_roots);
 614   }
 615 
 616   // All threads execute the following. A specific chunk of buckets
 617   // from the StringTable are the individual tasks.
 618   if (weak_roots != NULL) {
 619     if (is_par) {
 620       StringTable::possibly_parallel_oops_do(weak_roots);
 621     } else {
 622       StringTable::oops_do(weak_roots);
 623     }
 624   }
 625 
 626   if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
 627     if (so & SO_ScavengeCodeCache) {
 628       assert(code_roots != NULL, "must supply closure for code cache");
 629 
 630       // We only visit parts of the CodeCache when scavenging.
 631       CodeCache::scavenge_root_nmethods_do(code_roots);
 632     }
 633     if (so & SO_AllCodeCache) {
 634       assert(code_roots != NULL, "must supply closure for code cache");
 635 
 636       // CMSCollector uses this to do intermediate-strength collections.
 637       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 638       CodeCache::blobs_do(code_roots);
 639     }
 640     // Verify that the code cache contents are not subject to
 641     // movement by a scavenging collection.
 642     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 643     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 644   }
 645 }
 646 
 647 void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
 648                                            OopsInGenClosure* root_closure,
 649                                            OopsInGenClosure* old_gen_closure,
 650                                            CLDClosure* cld_closure) {
 651   MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);
 652 
 653   process_roots(scope, SO_ScavengeCodeCache, root_closure, root_closure,
 654                 cld_closure, cld_closure, &mark_code_closure);
 655 
 656   if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 657     root_closure->reset_generation();
 658   }
 659 
 660   // When collection is parallel, all threads get to cooperate to do
 661   // old generation scanning.
 662   old_gen_closure->set_generation(_old_gen);
 663   rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads());
 664   old_gen_closure->reset_generation();
 665 
 666   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 667 }
 668 
 669 void GenCollectedHeap::conc_process_roots(StrongRootsScope* scope,
 670                                          bool young_gen_as_roots,
 671                                          ScanningOption so,
 672                                          bool only_strong_roots,
 673                                          OopsInGenClosure* root_closure,
 674                                          CLDClosure* cld_closure) {
 675   MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
 676   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
 677   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 678 
 679   process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
 680 
 681   if (young_gen_as_roots &&
 682       !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 683     root_closure->set_generation(_young_gen);
 684     _young_gen->oop_iterate(root_closure);
 685     root_closure->reset_generation();
 686   }
 687 
 688   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 689 }
 690 
 691 void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
 692                                           bool only_strong_roots,
 693                                           ScanningOption so,
 694                                           bool is_adjust_phase,
 695                                           OopsInGenClosure* root_closure,
 696                                           CLDClosure* cld_closure) {
 697   MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
 698   OopsInGenClosure* weak_roots = is_adjust_phase ? root_closure : NULL;
 699   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 700 
 701   process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
 702 
 703   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 704 }
 705 
 706 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 707   JNIHandles::weak_oops_do(root_closure);
 708   _young_gen->ref_processor()->weak_oops_do(root_closure);
 709   _old_gen->ref_processor()->weak_oops_do(root_closure);
 710 }
 711 
 712 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 713 void GenCollectedHeap::                                                 \
 714 oop_since_save_marks_iterate(GenerationType gen,                        \
 715                              OopClosureType* cur,                       \
 716                              OopClosureType* older) {                   \
 717   if (gen == YoungGen) {                              \
 718     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 719     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \
 720   } else {                                                              \
 721     _old_gen->oop_since_save_marks_iterate##nv_suffix(cur);             \
 722   }                                                                     \
 723 }
 724 
 725 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 726 
 727 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 728 
 729 bool GenCollectedHeap::no_allocs_since_save_marks() {
 730   return _young_gen->no_allocs_since_save_marks() &&
 731          _old_gen->no_allocs_since_save_marks();
 732 }
 733 
 734 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 735   return _young_gen->supports_inline_contig_alloc();
 736 }
 737 
 738 HeapWord** GenCollectedHeap::top_addr() const {
 739   return _young_gen->top_addr();
 740 }
 741 
 742 HeapWord** GenCollectedHeap::end_addr() const {
 743   return _young_gen->end_addr();
 744 }
 745 
 746 // public collection interfaces
 747 
 748 void GenCollectedHeap::collect(GCCause::Cause cause) {
 749   if (should_do_concurrent_full_gc(cause)) {
 750 #if INCLUDE_ALL_GCS
 751     // Mostly concurrent full collection.
 752     collect_mostly_concurrent(cause);
 753 #else  // INCLUDE_ALL_GCS
 754     ShouldNotReachHere();
 755 #endif // INCLUDE_ALL_GCS
 756   } else if (cause == GCCause::_wb_young_gc) {
 757     // Young collection for the WhiteBox API.
 758     collect(cause, YoungGen);
 759   } else {
 760 #ifdef ASSERT
 761   if (cause == GCCause::_scavenge_alot) {
 762     // Young collection only.
 763     collect(cause, YoungGen);
 764   } else {
 765     // Stop-the-world full collection.
 766     collect(cause, OldGen);
 767   }
 768 #else
 769     // Stop-the-world full collection.
 770     collect(cause, OldGen);
 771 #endif
 772   }
 773 }
 774 
 775 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
 776   // The caller doesn't have the Heap_lock
 777   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 778   MutexLocker ml(Heap_lock);
 779   collect_locked(cause, max_generation);
 780 }
 781 
 782 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 783   // The caller has the Heap_lock
 784   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 785   collect_locked(cause, OldGen);
 786 }
 787 
 788 // this is the private collection interface
 789 // The Heap_lock is expected to be held on entry.
 790 
 791 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
 792   // Read the GC count while holding the Heap_lock
 793   unsigned int gc_count_before      = total_collections();
 794   unsigned int full_gc_count_before = total_full_collections();
 795   {
 796     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 797     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 798                          cause, max_generation);
 799     VMThread::execute(&op);
 800   }
 801 }
 802 
 803 #if INCLUDE_ALL_GCS
 804 bool GenCollectedHeap::create_cms_collector() {
 805 
 806   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
 807          "Unexpected generation kinds");
 808   // Skip two header words in the block content verification
 809   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 810   assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
 811   CMSCollector* collector =
 812     new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
 813                      _rem_set,
 814                      _gen_policy->as_concurrent_mark_sweep_policy());
 815 
 816   if (collector == NULL || !collector->completed_initialization()) {
 817     if (collector) {
 818       delete collector;  // Be nice in embedded situation
 819     }
 820     vm_shutdown_during_initialization("Could not create CMS collector");
 821     return false;
 822   }
 823   return true;  // success
 824 }
 825 
 826 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 827   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 828 
 829   MutexLocker ml(Heap_lock);
 830   // Read the GC counts while holding the Heap_lock
 831   unsigned int full_gc_count_before = total_full_collections();
 832   unsigned int gc_count_before      = total_collections();
 833   {
 834     MutexUnlocker mu(Heap_lock);
 835     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 836     VMThread::execute(&op);
 837   }
 838 }
 839 #endif // INCLUDE_ALL_GCS
 840 
 841 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 842    do_full_collection(clear_all_soft_refs, OldGen);
 843 }
 844 
 845 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 846                                           GenerationType last_generation) {
 847   GenerationType local_last_generation;
 848   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 849       gc_cause() == GCCause::_gc_locker) {
 850     local_last_generation = YoungGen;
 851   } else {
 852     local_last_generation = last_generation;
 853   }
 854 
 855   do_collection(true,                   // full
 856                 clear_all_soft_refs,    // clear_all_soft_refs
 857                 0,                      // size
 858                 false,                  // is_tlab
 859                 local_last_generation); // last_generation
 860   // Hack XXX FIX ME !!!
 861   // A scavenge may not have been attempted, or may have
 862   // been attempted and failed, because the old gen was too full
 863   if (local_last_generation == YoungGen && gc_cause() == GCCause::_gc_locker &&
 864       incremental_collection_will_fail(false /* don't consult_young */)) {
 865     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 866     // This time allow the old gen to be collected as well
 867     do_collection(true,                // full
 868                   clear_all_soft_refs, // clear_all_soft_refs
 869                   0,                   // size
 870                   false,               // is_tlab
 871                   OldGen);             // last_generation
 872   }
 873 }
 874 
 875 bool GenCollectedHeap::is_in_young(oop p) {
 876   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 877   assert(result == _young_gen->is_in_reserved(p),
 878          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
 879   return result;
 880 }
 881 
 882 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 883 bool GenCollectedHeap::is_in(const void* p) const {
 884   return _young_gen->is_in(p) || _old_gen->is_in(p);
 885 }
 886 
 887 #ifdef ASSERT
 888 // Don't implement this by using is_in_young().  This method is used
 889 // in some cases to check that is_in_young() is correct.
 890 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 891   assert(is_in_reserved(p) || p == NULL,
 892     "Does not work if address is non-null and outside of the heap");
 893   return p < _young_gen->reserved().end() && p != NULL;
 894 }
 895 #endif
 896 
 897 void GenCollectedHeap::oop_iterate_no_header(OopClosure* cl) {
 898   NoHeaderExtendedOopClosure no_header_cl(cl);
 899   oop_iterate(&no_header_cl);
 900 }
 901 
 902 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
 903   _young_gen->oop_iterate(cl);
 904   _old_gen->oop_iterate(cl);
 905 }
 906 
 907 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 908   _young_gen->object_iterate(cl);
 909   _old_gen->object_iterate(cl);
 910 }
 911 
 912 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 913   _young_gen->safe_object_iterate(cl);
 914   _old_gen->safe_object_iterate(cl);
 915 }
 916 
 917 Space* GenCollectedHeap::space_containing(const void* addr) const {
 918   Space* res = _young_gen->space_containing(addr);
 919   if (res != NULL) {
 920     return res;
 921   }
 922   res = _old_gen->space_containing(addr);
 923   assert(res != NULL, "Could not find containing space");
 924   return res;
 925 }
 926 
 927 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 928   assert(is_in_reserved(addr), "block_start of address outside of heap");
 929   if (_young_gen->is_in_reserved(addr)) {
 930     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 931     return _young_gen->block_start(addr);
 932   }
 933 
 934   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 935   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 936   return _old_gen->block_start(addr);
 937 }
 938 
 939 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
 940   assert(is_in_reserved(addr), "block_size of address outside of heap");
 941   if (_young_gen->is_in_reserved(addr)) {
 942     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 943     return _young_gen->block_size(addr);
 944   }
 945 
 946   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 947   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 948   return _old_gen->block_size(addr);
 949 }
 950 
 951 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 952   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 953   assert(block_start(addr) == addr, "addr must be a block start");
 954   if (_young_gen->is_in_reserved(addr)) {
 955     return _young_gen->block_is_obj(addr);
 956   }
 957 
 958   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 959   return _old_gen->block_is_obj(addr);
 960 }
 961 
 962 bool GenCollectedHeap::supports_tlab_allocation() const {
 963   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 964   return _young_gen->supports_tlab_allocation();
 965 }
 966 
 967 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 968   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 969   if (_young_gen->supports_tlab_allocation()) {
 970     return _young_gen->tlab_capacity();
 971   }
 972   return 0;
 973 }
 974 
 975 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 976   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 977   if (_young_gen->supports_tlab_allocation()) {
 978     return _young_gen->tlab_used();
 979   }
 980   return 0;
 981 }
 982 
 983 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 984   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 985   if (_young_gen->supports_tlab_allocation()) {
 986     return _young_gen->unsafe_max_tlab_alloc();
 987   }
 988   return 0;
 989 }
 990 
 991 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
 992   bool gc_overhead_limit_was_exceeded;
 993   return gen_policy()->mem_allocate_work(size /* size */,
 994                                          true /* is_tlab */,
 995                                          &gc_overhead_limit_was_exceeded);
 996 }
 997 
 998 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
 999 // from the list headed by "*prev_ptr".
1000 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1001   bool first = true;
1002   size_t min_size = 0;   // "first" makes this conceptually infinite.
1003   ScratchBlock **smallest_ptr, *smallest;
1004   ScratchBlock  *cur = *prev_ptr;
1005   while (cur) {
1006     assert(*prev_ptr == cur, "just checking");
1007     if (first || cur->num_words < min_size) {
1008       smallest_ptr = prev_ptr;
1009       smallest     = cur;
1010       min_size     = smallest->num_words;
1011       first        = false;
1012     }
1013     prev_ptr = &cur->next;
1014     cur     =  cur->next;
1015   }
1016   smallest      = *smallest_ptr;
1017   *smallest_ptr = smallest->next;
1018   return smallest;
1019 }
1020 
1021 // Sort the scratch block list headed by res into decreasing size order,
1022 // and set "res" to the result.
1023 static void sort_scratch_list(ScratchBlock*& list) {
1024   ScratchBlock* sorted = NULL;
1025   ScratchBlock* unsorted = list;
1026   while (unsorted) {
1027     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1028     smallest->next  = sorted;
1029     sorted          = smallest;
1030   }
1031   list = sorted;
1032 }
1033 
1034 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1035                                                size_t max_alloc_words) {
1036   ScratchBlock* res = NULL;
1037   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
1038   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
1039   sort_scratch_list(res);
1040   return res;
1041 }
1042 
1043 void GenCollectedHeap::release_scratch() {
1044   _young_gen->reset_scratch();
1045   _old_gen->reset_scratch();
1046 }
1047 
1048 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1049   void do_generation(Generation* gen) {
1050     gen->prepare_for_verify();
1051   }
1052 };
1053 
1054 void GenCollectedHeap::prepare_for_verify() {
1055   ensure_parsability(false);        // no need to retire TLABs
1056   GenPrepareForVerifyClosure blk;
1057   generation_iterate(&blk, false);
1058 }
1059 
1060 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1061                                           bool old_to_young) {
1062   if (old_to_young) {
1063     cl->do_generation(_old_gen);
1064     cl->do_generation(_young_gen);
1065   } else {
1066     cl->do_generation(_young_gen);
1067     cl->do_generation(_old_gen);
1068   }
1069 }
1070 
1071 bool GenCollectedHeap::is_maximal_no_gc() const {
1072   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1073 }
1074 
1075 void GenCollectedHeap::save_marks() {
1076   _young_gen->save_marks();
1077   _old_gen->save_marks();
1078 }
1079 
1080 GenCollectedHeap* GenCollectedHeap::heap() {
1081   CollectedHeap* heap = Universe::heap();
1082   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1083   assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1084   return (GenCollectedHeap*)heap;
1085 }
1086 
1087 void GenCollectedHeap::prepare_for_compaction() {
1088   // Start by compacting into same gen.
1089   CompactPoint cp(_old_gen);
1090   _old_gen->prepare_for_compaction(&cp);
1091   _young_gen->prepare_for_compaction(&cp);
1092 }
1093 
1094 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1095   log_debug(gc, verify)("%s", _old_gen->name());
1096   _old_gen->verify();
1097 
1098   log_debug(gc, verify)("%s", _old_gen->name());
1099   _young_gen->verify();
1100 
1101   log_debug(gc, verify)("RemSet");
1102   rem_set()->verify();
1103 }
1104 
1105 void GenCollectedHeap::print_on(outputStream* st) const {
1106   _young_gen->print_on(st);
1107   _old_gen->print_on(st);
1108   MetaspaceAux::print_on(st);
1109 }
1110 
1111 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1112   if (workers() != NULL) {
1113     workers()->threads_do(tc);
1114   }
1115 #if INCLUDE_ALL_GCS
1116   if (UseConcMarkSweepGC) {
1117     ConcurrentMarkSweepThread::threads_do(tc);
1118   }
1119 #endif // INCLUDE_ALL_GCS
1120 }
1121 
1122 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1123 #if INCLUDE_ALL_GCS
1124   if (UseConcMarkSweepGC) {
1125     workers()->print_worker_threads_on(st);
1126     ConcurrentMarkSweepThread::print_all_on(st);
1127   }
1128 #endif // INCLUDE_ALL_GCS
1129 }
1130 
1131 void GenCollectedHeap::print_on_error(outputStream* st) const {
1132   this->CollectedHeap::print_on_error(st);
1133 
1134 #if INCLUDE_ALL_GCS
1135   if (UseConcMarkSweepGC) {
1136     st->cr();
1137     CMSCollector::print_on_error(st);
1138   }
1139 #endif // INCLUDE_ALL_GCS
1140 }
1141 
1142 void GenCollectedHeap::print_tracing_info() const {
1143   if (TraceYoungGenTime) {
1144     _young_gen->print_summary_info();
1145   }
1146   if (TraceOldGenTime) {
1147     _old_gen->print_summary_info();
1148   }
1149 }
1150 
1151 void GenCollectedHeap::print_heap_change(size_t young_prev_used, size_t old_prev_used) const {
1152   log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1153                      _young_gen->short_name(), young_prev_used / K, _young_gen->used() /K, _young_gen->capacity() /K);
1154   log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1155                      _old_gen->short_name(), old_prev_used / K, _old_gen->used() /K, _old_gen->capacity() /K);
1156 }
1157 
1158 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1159  private:
1160   bool _full;
1161  public:
1162   void do_generation(Generation* gen) {
1163     gen->gc_prologue(_full);
1164   }
1165   GenGCPrologueClosure(bool full) : _full(full) {};
1166 };
1167 
1168 void GenCollectedHeap::gc_prologue(bool full) {
1169   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1170 
1171   always_do_update_barrier = false;
1172   // Fill TLAB's and such
1173   CollectedHeap::accumulate_statistics_all_tlabs();
1174   ensure_parsability(true);   // retire TLABs
1175 
1176   // Walk generations
1177   GenGCPrologueClosure blk(full);
1178   generation_iterate(&blk, false);  // not old-to-young.
1179 };
1180 
1181 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1182  private:
1183   bool _full;
1184  public:
1185   void do_generation(Generation* gen) {
1186     gen->gc_epilogue(_full);
1187   }
1188   GenGCEpilogueClosure(bool full) : _full(full) {};
1189 };
1190 
1191 void GenCollectedHeap::gc_epilogue(bool full) {
1192 #if defined(COMPILER2) || INCLUDE_JVMCI
1193   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1194   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1195   guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1196 #endif /* COMPILER2 || INCLUDE_JVMCI */
1197 
1198   resize_all_tlabs();
1199 
1200   GenGCEpilogueClosure blk(full);
1201   generation_iterate(&blk, false);  // not old-to-young.
1202 
1203   if (!CleanChunkPoolAsync) {
1204     Chunk::clean_chunk_pool();
1205   }
1206 
1207   MetaspaceCounters::update_performance_counters();
1208   CompressedClassSpaceCounters::update_performance_counters();
1209 
1210   always_do_update_barrier = UseConcMarkSweepGC;
1211 };
1212 
1213 #ifndef PRODUCT
1214 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1215  private:
1216  public:
1217   void do_generation(Generation* gen) {
1218     gen->record_spaces_top();
1219   }
1220 };
1221 
1222 void GenCollectedHeap::record_gen_tops_before_GC() {
1223   if (ZapUnusedHeapArea) {
1224     GenGCSaveTopsBeforeGCClosure blk;
1225     generation_iterate(&blk, false);  // not old-to-young.
1226   }
1227 }
1228 #endif  // not PRODUCT
1229 
1230 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1231  public:
1232   void do_generation(Generation* gen) {
1233     gen->ensure_parsability();
1234   }
1235 };
1236 
1237 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1238   CollectedHeap::ensure_parsability(retire_tlabs);
1239   GenEnsureParsabilityClosure ep_cl;
1240   generation_iterate(&ep_cl, false);
1241 }
1242 
1243 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1244                                               oop obj,
1245                                               size_t obj_size) {
1246   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1247   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1248   HeapWord* result = NULL;
1249 
1250   result = old_gen->expand_and_allocate(obj_size, false);
1251 
1252   if (result != NULL) {
1253     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1254   }
1255   return oop(result);
1256 }
1257 
1258 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1259   jlong _time;   // in ms
1260   jlong _now;    // in ms
1261 
1262  public:
1263   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1264 
1265   jlong time() { return _time; }
1266 
1267   void do_generation(Generation* gen) {
1268     _time = MIN2(_time, gen->time_of_last_gc(_now));
1269   }
1270 };
1271 
1272 jlong GenCollectedHeap::millis_since_last_gc() {
1273   // We need a monotonically non-decreasing time in ms but
1274   // os::javaTimeMillis() does not guarantee monotonicity.
1275   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1276   GenTimeOfLastGCClosure tolgc_cl(now);
1277   // iterate over generations getting the oldest
1278   // time that a generation was collected
1279   generation_iterate(&tolgc_cl, false);
1280 
1281   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1282   // provided the underlying platform provides such a time source
1283   // (and it is bug free). So we still have to guard against getting
1284   // back a time later than 'now'.
1285   jlong retVal = now - tolgc_cl.time();
1286   if (retVal < 0) {
1287     NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, retVal);)
1288     return 0;
1289   }
1290   return retVal;
1291 }
1292 
1293 void GenCollectedHeap::stop() {
1294 #if INCLUDE_ALL_GCS
1295   if (UseConcMarkSweepGC) {
1296     ConcurrentMarkSweepThread::cmst()->stop();
1297   }
1298 #endif
1299 }