1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/collectorCounters.hpp"
  33 #include "gc/shared/gcId.hpp"
  34 #include "gc/shared/gcLocker.inline.hpp"
  35 #include "gc/shared/gcTrace.hpp"
  36 #include "gc/shared/gcTraceTime.hpp"
  37 #include "gc/shared/genCollectedHeap.hpp"
  38 #include "gc/shared/genOopClosures.inline.hpp"
  39 #include "gc/shared/generationSpec.hpp"
  40 #include "gc/shared/space.hpp"
  41 #include "gc/shared/strongRootsScope.hpp"
  42 #include "gc/shared/vmGCOperations.hpp"
  43 #include "gc/shared/workgroup.hpp"
  44 #include "memory/filemap.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/biasedLocking.hpp"
  48 #include "runtime/fprofiler.hpp"
  49 #include "runtime/handles.hpp"
  50 #include "runtime/handles.inline.hpp"
  51 #include "runtime/java.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "services/management.hpp"
  54 #include "services/memoryService.hpp"
  55 #include "utilities/macros.hpp"
  56 #include "utilities/stack.inline.hpp"
  57 #include "utilities/vmError.hpp"
  58 #if INCLUDE_ALL_GCS
  59 #include "gc/cms/concurrentMarkSweepThread.hpp"
  60 #include "gc/cms/vmCMSOperations.hpp"
  61 #endif // INCLUDE_ALL_GCS
  62 
  63 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
  64 
  65 // The set of potentially parallel tasks in root scanning.
  66 enum GCH_strong_roots_tasks {
  67   GCH_PS_Universe_oops_do,
  68   GCH_PS_JNIHandles_oops_do,
  69   GCH_PS_ObjectSynchronizer_oops_do,
  70   GCH_PS_FlatProfiler_oops_do,
  71   GCH_PS_Management_oops_do,
  72   GCH_PS_SystemDictionary_oops_do,
  73   GCH_PS_ClassLoaderDataGraph_oops_do,
  74   GCH_PS_jvmti_oops_do,
  75   GCH_PS_CodeCache_oops_do,
  76   GCH_PS_younger_gens,
  77   // Leave this one last.
  78   GCH_PS_NumElements
  79 };
  80 
  81 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
  82   CollectedHeap(),
  83   _rem_set(NULL),
  84   _gen_policy(policy),
  85   _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
  86   _full_collections_completed(0)
  87 {
  88   assert(policy != NULL, "Sanity check");
  89   if (UseConcMarkSweepGC) {
  90     _workers = new WorkGang("GC Thread", ParallelGCThreads,
  91                             /* are_GC_task_threads */true,
  92                             /* are_ConcurrentGC_threads */false);
  93     _workers->initialize_workers();
  94   } else {
  95     // Serial GC does not use workers.
  96     _workers = NULL;
  97   }
  98 }
  99 
 100 jint GenCollectedHeap::initialize() {
 101   CollectedHeap::pre_initialize();
 102 
 103   // While there are no constraints in the GC code that HeapWordSize
 104   // be any particular value, there are multiple other areas in the
 105   // system which believe this to be true (e.g. oop->object_size in some
 106   // cases incorrectly returns the size in wordSize units rather than
 107   // HeapWordSize).
 108   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 109 
 110   // Allocate space for the heap.
 111 
 112   char* heap_address;
 113   ReservedSpace heap_rs;
 114 
 115   size_t heap_alignment = collector_policy()->heap_alignment();
 116 
 117   heap_address = allocate(heap_alignment, &heap_rs);
 118 
 119   if (!heap_rs.is_reserved()) {
 120     vm_shutdown_during_initialization(
 121       "Could not reserve enough space for object heap");
 122     return JNI_ENOMEM;
 123   }
 124 
 125   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 126 
 127   _rem_set = collector_policy()->create_rem_set(reserved_region());
 128   set_barrier_set(rem_set()->bs());
 129 
 130   ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
 131   _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
 132   heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
 133 
 134   ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
 135   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
 136   clear_incremental_collection_failed();
 137 
 138 #if INCLUDE_ALL_GCS
 139   // If we are running CMS, create the collector responsible
 140   // for collecting the CMS generations.
 141   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 142     bool success = create_cms_collector();
 143     if (!success) return JNI_ENOMEM;
 144   }
 145 #endif // INCLUDE_ALL_GCS
 146 
 147   return JNI_OK;
 148 }
 149 
 150 char* GenCollectedHeap::allocate(size_t alignment,
 151                                  ReservedSpace* heap_rs){
 152   // Now figure out the total size.
 153   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 154   assert(alignment % pageSize == 0, "Must be");
 155 
 156   GenerationSpec* young_spec = gen_policy()->young_gen_spec();
 157   GenerationSpec* old_spec = gen_policy()->old_gen_spec();
 158 
 159   // Check for overflow.
 160   size_t total_reserved = young_spec->max_size() + old_spec->max_size();
 161   if (total_reserved < young_spec->max_size()) {
 162     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 163                                   "the maximum representable size");
 164   }
 165   assert(total_reserved % alignment == 0,
 166          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 167          SIZE_FORMAT, total_reserved, alignment);
 168 
 169   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 170   return heap_rs->base();
 171 }
 172 
 173 void GenCollectedHeap::post_initialize() {
 174   CollectedHeap::post_initialize();
 175   ref_processing_init();
 176   assert((_young_gen->kind() == Generation::DefNew) ||
 177          (_young_gen->kind() == Generation::ParNew),
 178     "Wrong youngest generation type");
 179   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 180 
 181   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
 182          _old_gen->kind() == Generation::MarkSweepCompact,
 183     "Wrong generation kind");
 184 
 185   _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 186                                       _old_gen->capacity(),
 187                                       def_new_gen->from()->capacity());
 188   _gen_policy->initialize_gc_policy_counters();
 189 }
 190 
 191 void GenCollectedHeap::ref_processing_init() {
 192   _young_gen->ref_processor_init();
 193   _old_gen->ref_processor_init();
 194 }
 195 
 196 size_t GenCollectedHeap::capacity() const {
 197   return _young_gen->capacity() + _old_gen->capacity();
 198 }
 199 
 200 size_t GenCollectedHeap::used() const {
 201   return _young_gen->used() + _old_gen->used();
 202 }
 203 
 204 void GenCollectedHeap::save_used_regions() {
 205   _old_gen->save_used_region();
 206   _young_gen->save_used_region();
 207 }
 208 
 209 size_t GenCollectedHeap::max_capacity() const {
 210   return _young_gen->max_capacity() + _old_gen->max_capacity();
 211 }
 212 
 213 // Update the _full_collections_completed counter
 214 // at the end of a stop-world full GC.
 215 unsigned int GenCollectedHeap::update_full_collections_completed() {
 216   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 217   assert(_full_collections_completed <= _total_full_collections,
 218          "Can't complete more collections than were started");
 219   _full_collections_completed = _total_full_collections;
 220   ml.notify_all();
 221   return _full_collections_completed;
 222 }
 223 
 224 // Update the _full_collections_completed counter, as appropriate,
 225 // at the end of a concurrent GC cycle. Note the conditional update
 226 // below to allow this method to be called by a concurrent collector
 227 // without synchronizing in any manner with the VM thread (which
 228 // may already have initiated a STW full collection "concurrently").
 229 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
 230   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 231   assert((_full_collections_completed <= _total_full_collections) &&
 232          (count <= _total_full_collections),
 233          "Can't complete more collections than were started");
 234   if (count > _full_collections_completed) {
 235     _full_collections_completed = count;
 236     ml.notify_all();
 237   }
 238   return _full_collections_completed;
 239 }
 240 
 241 
 242 #ifndef PRODUCT
 243 // Override of memory state checking method in CollectedHeap:
 244 // Some collectors (CMS for example) can't have badHeapWordVal written
 245 // in the first two words of an object. (For instance , in the case of
 246 // CMS these words hold state used to synchronize between certain
 247 // (concurrent) GC steps and direct allocating mutators.)
 248 // The skip_header_HeapWords() method below, allows us to skip
 249 // over the requisite number of HeapWord's. Note that (for
 250 // generational collectors) this means that those many words are
 251 // skipped in each object, irrespective of the generation in which
 252 // that object lives. The resultant loss of precision seems to be
 253 // harmless and the pain of avoiding that imprecision appears somewhat
 254 // higher than we are prepared to pay for such rudimentary debugging
 255 // support.
 256 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
 257                                                          size_t size) {
 258   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 259     // We are asked to check a size in HeapWords,
 260     // but the memory is mangled in juint words.
 261     juint* start = (juint*) (addr + skip_header_HeapWords());
 262     juint* end   = (juint*) (addr + size);
 263     for (juint* slot = start; slot < end; slot += 1) {
 264       assert(*slot == badHeapWordVal,
 265              "Found non badHeapWordValue in pre-allocation check");
 266     }
 267   }
 268 }
 269 #endif
 270 
 271 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 272                                                bool is_tlab,
 273                                                bool first_only) {
 274   HeapWord* res = NULL;
 275 
 276   if (_young_gen->should_allocate(size, is_tlab)) {
 277     res = _young_gen->allocate(size, is_tlab);
 278     if (res != NULL || first_only) {
 279       return res;
 280     }
 281   }
 282 
 283   if (_old_gen->should_allocate(size, is_tlab)) {
 284     res = _old_gen->allocate(size, is_tlab);
 285   }
 286 
 287   return res;
 288 }
 289 
 290 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 291                                          bool* gc_overhead_limit_was_exceeded) {
 292   return collector_policy()->mem_allocate_work(size,
 293                                                false /* is_tlab */,
 294                                                gc_overhead_limit_was_exceeded);
 295 }
 296 
 297 bool GenCollectedHeap::must_clear_all_soft_refs() {
 298   return _gc_cause == GCCause::_last_ditch_collection;
 299 }
 300 
 301 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 302   if (!UseConcMarkSweepGC) {
 303     return false;
 304   }
 305 
 306   switch (cause) {
 307     case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
 308     case GCCause::_java_lang_system_gc:
 309     case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
 310     default:                            return false;
 311   }
 312 }
 313 
 314 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 315                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 316                                           bool restore_marks_for_biased_locking) {
 317   // Timer for individual generations. Last argument is false: no CR
 318   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 319   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL);
 320   TraceCollectorStats tcs(gen->counters());
 321   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 322 
 323   size_t prev_used = gen->used();
 324   gen->stat_record()->invocations++;
 325   gen->stat_record()->accumulated_time.start();
 326 
 327   // Must be done anew before each collection because
 328   // a previous collection will do mangling and will
 329   // change top of some spaces.
 330   record_gen_tops_before_GC();
 331 
 332   if (PrintGC && Verbose) {
 333     // I didn't want to change the logging when removing the level concept,
 334     // but I guess this logging could say young/old or something instead of 0/1.
 335     uint level;
 336     if (heap()->is_young_gen(gen)) {
 337       level = 0;
 338     } else {
 339       level = 1;
 340     }
 341     gclog_or_tty->print("level=%u invoke=%d size=" SIZE_FORMAT,
 342                         level,
 343                         gen->stat_record()->invocations,
 344                         size * HeapWordSize);
 345   }
 346 
 347   if (run_verification && VerifyBeforeGC) {
 348     HandleMark hm;  // Discard invalid handles created during verification
 349     Universe::verify(" VerifyBeforeGC:");
 350   }
 351   COMPILER2_PRESENT(DerivedPointerTable::clear());
 352 
 353   if (restore_marks_for_biased_locking) {
 354     // We perform this mark word preservation work lazily
 355     // because it's only at this point that we know whether we
 356     // absolutely have to do it; we want to avoid doing it for
 357     // scavenge-only collections where it's unnecessary
 358     BiasedLocking::preserve_marks();
 359   }
 360 
 361   // Do collection work
 362   {
 363     // Note on ref discovery: For what appear to be historical reasons,
 364     // GCH enables and disabled (by enqueing) refs discovery.
 365     // In the future this should be moved into the generation's
 366     // collect method so that ref discovery and enqueueing concerns
 367     // are local to a generation. The collect method could return
 368     // an appropriate indication in the case that notification on
 369     // the ref lock was needed. This will make the treatment of
 370     // weak refs more uniform (and indeed remove such concerns
 371     // from GCH). XXX
 372 
 373     HandleMark hm;  // Discard invalid handles created during gc
 374     save_marks();   // save marks for all gens
 375     // We want to discover references, but not process them yet.
 376     // This mode is disabled in process_discovered_references if the
 377     // generation does some collection work, or in
 378     // enqueue_discovered_references if the generation returns
 379     // without doing any work.
 380     ReferenceProcessor* rp = gen->ref_processor();
 381     // If the discovery of ("weak") refs in this generation is
 382     // atomic wrt other collectors in this configuration, we
 383     // are guaranteed to have empty discovered ref lists.
 384     if (rp->discovery_is_atomic()) {
 385       rp->enable_discovery();
 386       rp->setup_policy(clear_soft_refs);
 387     } else {
 388       // collect() below will enable discovery as appropriate
 389     }
 390     gen->collect(full, clear_soft_refs, size, is_tlab);
 391     if (!rp->enqueuing_is_done()) {
 392       rp->enqueue_discovered_references();
 393     } else {
 394       rp->set_enqueuing_is_done(false);
 395     }
 396     rp->verify_no_references_recorded();
 397   }
 398 
 399   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 400 
 401   gen->stat_record()->accumulated_time.stop();
 402 
 403   update_gc_stats(gen, full);
 404 
 405   if (run_verification && VerifyAfterGC) {
 406     HandleMark hm;  // Discard invalid handles created during verification
 407     Universe::verify(" VerifyAfterGC:");
 408   }
 409 
 410   if (PrintGCDetails) {
 411     gclog_or_tty->print(":");
 412     gen->print_heap_change(prev_used);
 413   }
 414 }
 415 
 416 void GenCollectedHeap::do_collection(bool           full,
 417                                      bool           clear_all_soft_refs,
 418                                      size_t         size,
 419                                      bool           is_tlab,
 420                                      GenerationType max_generation) {
 421   ResourceMark rm;
 422   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 423 
 424   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 425   assert(my_thread->is_VM_thread() ||
 426          my_thread->is_ConcurrentGC_thread(),
 427          "incorrect thread type capability");
 428   assert(Heap_lock->is_locked(),
 429          "the requesting thread should have the Heap_lock");
 430   guarantee(!is_gc_active(), "collection is not reentrant");
 431 
 432   if (GC_locker::check_active_before_gc()) {
 433     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 434   }
 435 
 436   GCIdMarkAndRestore gc_id_mark;
 437 
 438   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 439                           collector_policy()->should_clear_all_soft_refs();
 440 
 441   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 442 
 443   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 444 
 445   print_heap_before_gc();
 446 
 447   {
 448     FlagSetting fl(_is_gc_active, true);
 449 
 450     bool complete = full && (max_generation == OldGen);
 451     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 452     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 453     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
 454 
 455     gc_prologue(complete);
 456     increment_total_collections(complete);
 457 
 458     size_t gch_prev_used = used();
 459     bool run_verification = total_collections() >= VerifyGCStartAt;
 460 
 461     bool prepared_for_verification = false;
 462     bool collected_old = false;
 463     bool old_collects_young = complete && !ScavengeBeforeFullGC;
 464 
 465     if (!old_collects_young && _young_gen->should_collect(full, size, is_tlab)) {
 466       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 467         prepare_for_verify();
 468         prepared_for_verification = true;
 469       }
 470 
 471       assert(!_young_gen->performs_in_place_marking(), "No young generation do in place marking");
 472       collect_generation(_young_gen,
 473                          full,
 474                          size,
 475                          is_tlab,
 476                          run_verification && VerifyGCLevel <= 0,
 477                          do_clear_all_soft_refs,
 478                          false);
 479 
 480       if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 481           size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 482         // Allocation request was met by young GC.
 483         size = 0;
 484       }
 485     }
 486 
 487     bool must_restore_marks_for_biased_locking = false;
 488 
 489     if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) {
 490       GCIdMarkAndRestore gc_id_mark;
 491       if (!complete) {
 492         // The full_collections increment was missed above.
 493         increment_total_full_collections();
 494       }
 495 
 496       pre_full_gc_dump(NULL);    // do any pre full gc dumps
 497 
 498       if (!prepared_for_verification && run_verification &&
 499           VerifyGCLevel <= 1 && VerifyBeforeGC) {
 500         prepare_for_verify();
 501       }
 502 
 503       assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
 504       collect_generation(_old_gen,
 505                          full,
 506                          size,
 507                          is_tlab,
 508                          run_verification && VerifyGCLevel <= 1,
 509                          do_clear_all_soft_refs,
 510                          true);
 511 
 512       must_restore_marks_for_biased_locking = true;
 513       collected_old = true;
 514     }
 515 
 516     // Update "complete" boolean wrt what actually transpired --
 517     // for instance, a promotion failure could have led to
 518     // a whole heap collection.
 519     complete = complete || collected_old;
 520 
 521     if (complete) { // We did a full collection
 522       // FIXME: See comment at pre_full_gc_dump call
 523       post_full_gc_dump(NULL);   // do any post full gc dumps
 524     }
 525 
 526     if (PrintGCDetails) {
 527       print_heap_change(gch_prev_used);
 528 
 529       // Print metaspace info for full GC with PrintGCDetails flag.
 530       if (complete) {
 531         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 532       }
 533     }
 534 
 535     // Adjust generation sizes.
 536     if (collected_old) {
 537       _old_gen->compute_new_size();
 538     }
 539     _young_gen->compute_new_size();
 540 
 541     if (complete) {
 542       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 543       ClassLoaderDataGraph::purge();
 544       MetaspaceAux::verify_metrics();
 545       // Resize the metaspace capacity after full collections
 546       MetaspaceGC::compute_new_size();
 547       update_full_collections_completed();
 548     }
 549 
 550     // Track memory usage and detect low memory after GC finishes
 551     MemoryService::track_memory_usage();
 552 
 553     gc_epilogue(complete);
 554 
 555     if (must_restore_marks_for_biased_locking) {
 556       BiasedLocking::restore_marks();
 557     }
 558   }
 559 
 560   print_heap_after_gc();
 561 
 562 #ifdef TRACESPINNING
 563   ParallelTaskTerminator::print_termination_counts();
 564 #endif
 565 }
 566 
 567 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 568   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
 569 }
 570 
 571 #ifdef ASSERT
 572 class AssertNonScavengableClosure: public OopClosure {
 573 public:
 574   virtual void do_oop(oop* p) {
 575     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 576       "Referent should not be scavengable.");  }
 577   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 578 };
 579 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 580 #endif
 581 
 582 void GenCollectedHeap::process_roots(StrongRootsScope* scope,
 583                                      ScanningOption so,
 584                                      OopClosure* strong_roots,
 585                                      OopClosure* weak_roots,
 586                                      CLDClosure* strong_cld_closure,
 587                                      CLDClosure* weak_cld_closure,
 588                                      CodeBlobClosure* code_roots) {
 589   // General roots.
 590   assert(Threads::thread_claim_parity() != 0, "must have called prologue code");
 591   assert(code_roots != NULL, "code root closure should always be set");
 592   // _n_termination for _process_strong_tasks should be set up stream
 593   // in a method not running in a GC worker.  Otherwise the GC worker
 594   // could be trying to change the termination condition while the task
 595   // is executing in another GC worker.
 596 
 597   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
 598     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 599   }
 600 
 601   // Some CLDs contained in the thread frames should be considered strong.
 602   // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
 603   CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
 604   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 605   CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 606 
 607   bool is_par = scope->n_threads() > 1;
 608   Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p);
 609 
 610   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
 611     Universe::oops_do(strong_roots);
 612   }
 613   // Global (strong) JNI handles
 614   if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
 615     JNIHandles::oops_do(strong_roots);
 616   }
 617 
 618   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
 619     ObjectSynchronizer::oops_do(strong_roots);
 620   }
 621   if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) {
 622     FlatProfiler::oops_do(strong_roots);
 623   }
 624   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
 625     Management::oops_do(strong_roots);
 626   }
 627   if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
 628     JvmtiExport::oops_do(strong_roots);
 629   }
 630 
 631   if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
 632     SystemDictionary::roots_oops_do(strong_roots, weak_roots);
 633   }
 634 
 635   // All threads execute the following. A specific chunk of buckets
 636   // from the StringTable are the individual tasks.
 637   if (weak_roots != NULL) {
 638     if (is_par) {
 639       StringTable::possibly_parallel_oops_do(weak_roots);
 640     } else {
 641       StringTable::oops_do(weak_roots);
 642     }
 643   }
 644 
 645   if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
 646     if (so & SO_ScavengeCodeCache) {
 647       assert(code_roots != NULL, "must supply closure for code cache");
 648 
 649       // We only visit parts of the CodeCache when scavenging.
 650       CodeCache::scavenge_root_nmethods_do(code_roots);
 651     }
 652     if (so & SO_AllCodeCache) {
 653       assert(code_roots != NULL, "must supply closure for code cache");
 654 
 655       // CMSCollector uses this to do intermediate-strength collections.
 656       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 657       CodeCache::blobs_do(code_roots);
 658     }
 659     // Verify that the code cache contents are not subject to
 660     // movement by a scavenging collection.
 661     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 662     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 663   }
 664 }
 665 
 666 void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
 667                                          GenerationType type,
















 668                                          bool young_gen_as_roots,
 669                                          ScanningOption so,
 670                                          bool only_strong_roots,
 671                                          OopsInGenClosure* not_older_gens,
 672                                          OopsInGenClosure* older_gens,
 673                                          CLDClosure* cld_closure) {
 674   const bool is_adjust_phase = !only_strong_roots && !young_gen_as_roots;
 675 
 676   bool is_moving_collection = false;
 677   if (type == YoungGen || is_adjust_phase) {
 678     // young collections are always moving
 679     is_moving_collection = true;
 680   }
 681 
 682   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
 683   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
 684   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 685 
 686   process_roots(scope, so,
 687                 not_older_gens, weak_roots,
 688                 cld_closure, weak_cld_closure,
 689                 &mark_code_closure);
 690 
 691   if (young_gen_as_roots) {
 692     if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 693       if (type == OldGen) {
 694         not_older_gens->set_generation(_young_gen);
 695         _young_gen->oop_iterate(not_older_gens);
 696       }
 697       not_older_gens->reset_generation();
 698     }
 699   }
 700   // When collection is parallel, all threads get to cooperate to do
 701   // old generation scanning.
 702   if (type == YoungGen) {
 703     older_gens->set_generation(_old_gen);
 704     rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads());
 705     older_gens->reset_generation();
 706   }
 707 
 708   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 709 }
 710 
 711 
 712 class AlwaysTrueClosure: public BoolObjectClosure {
 713 public:
 714   bool do_object_b(oop p) { return true; }
 715 };
 716 static AlwaysTrueClosure always_true;
 717 
 718 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 719   JNIHandles::weak_oops_do(&always_true, root_closure);
 720   _young_gen->ref_processor()->weak_oops_do(root_closure);
 721   _old_gen->ref_processor()->weak_oops_do(root_closure);
 722 }
 723 
 724 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 725 void GenCollectedHeap::                                                 \
 726 oop_since_save_marks_iterate(GenerationType gen,                        \
 727                              OopClosureType* cur,                       \
 728                              OopClosureType* older) {                   \
 729   if (gen == YoungGen) {                              \
 730     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 731     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \
 732   } else {                                                              \
 733     _old_gen->oop_since_save_marks_iterate##nv_suffix(cur);             \
 734   }                                                                     \
 735 }
 736 
 737 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 738 
 739 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 740 
 741 bool GenCollectedHeap::no_allocs_since_save_marks() {
 742   return _young_gen->no_allocs_since_save_marks() &&
 743          _old_gen->no_allocs_since_save_marks();
 744 }
 745 
 746 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 747   return _young_gen->supports_inline_contig_alloc();
 748 }
 749 
 750 HeapWord** GenCollectedHeap::top_addr() const {
 751   return _young_gen->top_addr();
 752 }
 753 
 754 HeapWord** GenCollectedHeap::end_addr() const {
 755   return _young_gen->end_addr();
 756 }
 757 
 758 // public collection interfaces
 759 
 760 void GenCollectedHeap::collect(GCCause::Cause cause) {
 761   if (should_do_concurrent_full_gc(cause)) {
 762 #if INCLUDE_ALL_GCS
 763     // Mostly concurrent full collection.
 764     collect_mostly_concurrent(cause);
 765 #else  // INCLUDE_ALL_GCS
 766     ShouldNotReachHere();
 767 #endif // INCLUDE_ALL_GCS
 768   } else if (cause == GCCause::_wb_young_gc) {
 769     // Young collection for the WhiteBox API.
 770     collect(cause, YoungGen);
 771   } else {
 772 #ifdef ASSERT
 773   if (cause == GCCause::_scavenge_alot) {
 774     // Young collection only.
 775     collect(cause, YoungGen);
 776   } else {
 777     // Stop-the-world full collection.
 778     collect(cause, OldGen);
 779   }
 780 #else
 781     // Stop-the-world full collection.
 782     collect(cause, OldGen);
 783 #endif
 784   }
 785 }
 786 
 787 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
 788   // The caller doesn't have the Heap_lock
 789   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 790   MutexLocker ml(Heap_lock);
 791   collect_locked(cause, max_generation);
 792 }
 793 
 794 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 795   // The caller has the Heap_lock
 796   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 797   collect_locked(cause, OldGen);
 798 }
 799 
 800 // this is the private collection interface
 801 // The Heap_lock is expected to be held on entry.
 802 
 803 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
 804   // Read the GC count while holding the Heap_lock
 805   unsigned int gc_count_before      = total_collections();
 806   unsigned int full_gc_count_before = total_full_collections();
 807   {
 808     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 809     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 810                          cause, max_generation);
 811     VMThread::execute(&op);
 812   }
 813 }
 814 
 815 #if INCLUDE_ALL_GCS
 816 bool GenCollectedHeap::create_cms_collector() {
 817 
 818   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
 819          "Unexpected generation kinds");
 820   // Skip two header words in the block content verification
 821   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 822   assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
 823   CMSCollector* collector =
 824     new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
 825                      _rem_set,
 826                      _gen_policy->as_concurrent_mark_sweep_policy());
 827 
 828   if (collector == NULL || !collector->completed_initialization()) {
 829     if (collector) {
 830       delete collector;  // Be nice in embedded situation
 831     }
 832     vm_shutdown_during_initialization("Could not create CMS collector");
 833     return false;
 834   }
 835   return true;  // success
 836 }
 837 
 838 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 839   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 840 
 841   MutexLocker ml(Heap_lock);
 842   // Read the GC counts while holding the Heap_lock
 843   unsigned int full_gc_count_before = total_full_collections();
 844   unsigned int gc_count_before      = total_collections();
 845   {
 846     MutexUnlocker mu(Heap_lock);
 847     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 848     VMThread::execute(&op);
 849   }
 850 }
 851 #endif // INCLUDE_ALL_GCS
 852 
 853 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 854    do_full_collection(clear_all_soft_refs, OldGen);
 855 }
 856 
 857 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 858                                           GenerationType last_generation) {
 859   GenerationType local_last_generation;
 860   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 861       gc_cause() == GCCause::_gc_locker) {
 862     local_last_generation = YoungGen;
 863   } else {
 864     local_last_generation = last_generation;
 865   }
 866 
 867   do_collection(true,                   // full
 868                 clear_all_soft_refs,    // clear_all_soft_refs
 869                 0,                      // size
 870                 false,                  // is_tlab
 871                 local_last_generation); // last_generation
 872   // Hack XXX FIX ME !!!
 873   // A scavenge may not have been attempted, or may have
 874   // been attempted and failed, because the old gen was too full
 875   if (local_last_generation == YoungGen && gc_cause() == GCCause::_gc_locker &&
 876       incremental_collection_will_fail(false /* don't consult_young */)) {
 877     if (PrintGCDetails) {
 878       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 879                              "because scavenge failed");
 880     }
 881     // This time allow the old gen to be collected as well
 882     do_collection(true,                // full
 883                   clear_all_soft_refs, // clear_all_soft_refs
 884                   0,                   // size
 885                   false,               // is_tlab
 886                   OldGen);             // last_generation
 887   }
 888 }
 889 
 890 bool GenCollectedHeap::is_in_young(oop p) {
 891   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 892   assert(result == _young_gen->is_in_reserved(p),
 893          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
 894   return result;
 895 }
 896 
 897 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 898 bool GenCollectedHeap::is_in(const void* p) const {
 899   return _young_gen->is_in(p) || _old_gen->is_in(p);
 900 }
 901 
 902 #ifdef ASSERT
 903 // Don't implement this by using is_in_young().  This method is used
 904 // in some cases to check that is_in_young() is correct.
 905 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 906   assert(is_in_reserved(p) || p == NULL,
 907     "Does not work if address is non-null and outside of the heap");
 908   return p < _young_gen->reserved().end() && p != NULL;
 909 }
 910 #endif
 911 
 912 void GenCollectedHeap::oop_iterate_no_header(OopClosure* cl) {
 913   NoHeaderExtendedOopClosure no_header_cl(cl);
 914   oop_iterate(&no_header_cl);
 915 }
 916 
 917 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
 918   _young_gen->oop_iterate(cl);
 919   _old_gen->oop_iterate(cl);
 920 }
 921 
 922 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 923   _young_gen->object_iterate(cl);
 924   _old_gen->object_iterate(cl);
 925 }
 926 
 927 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 928   _young_gen->safe_object_iterate(cl);
 929   _old_gen->safe_object_iterate(cl);
 930 }
 931 
 932 Space* GenCollectedHeap::space_containing(const void* addr) const {
 933   Space* res = _young_gen->space_containing(addr);
 934   if (res != NULL) {
 935     return res;
 936   }
 937   res = _old_gen->space_containing(addr);
 938   assert(res != NULL, "Could not find containing space");
 939   return res;
 940 }
 941 
 942 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 943   assert(is_in_reserved(addr), "block_start of address outside of heap");
 944   if (_young_gen->is_in_reserved(addr)) {
 945     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 946     return _young_gen->block_start(addr);
 947   }
 948 
 949   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 950   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 951   return _old_gen->block_start(addr);
 952 }
 953 
 954 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
 955   assert(is_in_reserved(addr), "block_size of address outside of heap");
 956   if (_young_gen->is_in_reserved(addr)) {
 957     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 958     return _young_gen->block_size(addr);
 959   }
 960 
 961   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 962   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 963   return _old_gen->block_size(addr);
 964 }
 965 
 966 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 967   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 968   assert(block_start(addr) == addr, "addr must be a block start");
 969   if (_young_gen->is_in_reserved(addr)) {
 970     return _young_gen->block_is_obj(addr);
 971   }
 972 
 973   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 974   return _old_gen->block_is_obj(addr);
 975 }
 976 
 977 bool GenCollectedHeap::supports_tlab_allocation() const {
 978   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 979   return _young_gen->supports_tlab_allocation();
 980 }
 981 
 982 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 983   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 984   if (_young_gen->supports_tlab_allocation()) {
 985     return _young_gen->tlab_capacity();
 986   }
 987   return 0;
 988 }
 989 
 990 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 991   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 992   if (_young_gen->supports_tlab_allocation()) {
 993     return _young_gen->tlab_used();
 994   }
 995   return 0;
 996 }
 997 
 998 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 999   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1000   if (_young_gen->supports_tlab_allocation()) {
1001     return _young_gen->unsafe_max_tlab_alloc();
1002   }
1003   return 0;
1004 }
1005 
1006 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1007   bool gc_overhead_limit_was_exceeded;
1008   return collector_policy()->mem_allocate_work(size /* size */,
1009                                                true /* is_tlab */,
1010                                                &gc_overhead_limit_was_exceeded);
1011 }
1012 
1013 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1014 // from the list headed by "*prev_ptr".
1015 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1016   bool first = true;
1017   size_t min_size = 0;   // "first" makes this conceptually infinite.
1018   ScratchBlock **smallest_ptr, *smallest;
1019   ScratchBlock  *cur = *prev_ptr;
1020   while (cur) {
1021     assert(*prev_ptr == cur, "just checking");
1022     if (first || cur->num_words < min_size) {
1023       smallest_ptr = prev_ptr;
1024       smallest     = cur;
1025       min_size     = smallest->num_words;
1026       first        = false;
1027     }
1028     prev_ptr = &cur->next;
1029     cur     =  cur->next;
1030   }
1031   smallest      = *smallest_ptr;
1032   *smallest_ptr = smallest->next;
1033   return smallest;
1034 }
1035 
1036 // Sort the scratch block list headed by res into decreasing size order,
1037 // and set "res" to the result.
1038 static void sort_scratch_list(ScratchBlock*& list) {
1039   ScratchBlock* sorted = NULL;
1040   ScratchBlock* unsorted = list;
1041   while (unsorted) {
1042     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1043     smallest->next  = sorted;
1044     sorted          = smallest;
1045   }
1046   list = sorted;
1047 }
1048 
1049 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1050                                                size_t max_alloc_words) {
1051   ScratchBlock* res = NULL;
1052   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
1053   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
1054   sort_scratch_list(res);
1055   return res;
1056 }
1057 
1058 void GenCollectedHeap::release_scratch() {
1059   _young_gen->reset_scratch();
1060   _old_gen->reset_scratch();
1061 }
1062 
1063 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1064   void do_generation(Generation* gen) {
1065     gen->prepare_for_verify();
1066   }
1067 };
1068 
1069 void GenCollectedHeap::prepare_for_verify() {
1070   ensure_parsability(false);        // no need to retire TLABs
1071   GenPrepareForVerifyClosure blk;
1072   generation_iterate(&blk, false);
1073 }
1074 
1075 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1076                                           bool old_to_young) {
1077   if (old_to_young) {
1078     cl->do_generation(_old_gen);
1079     cl->do_generation(_young_gen);
1080   } else {
1081     cl->do_generation(_young_gen);
1082     cl->do_generation(_old_gen);
1083   }
1084 }
1085 
1086 bool GenCollectedHeap::is_maximal_no_gc() const {
1087   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1088 }
1089 
1090 void GenCollectedHeap::save_marks() {
1091   _young_gen->save_marks();
1092   _old_gen->save_marks();
1093 }
1094 
1095 GenCollectedHeap* GenCollectedHeap::heap() {
1096   CollectedHeap* heap = Universe::heap();
1097   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1098   assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1099   return (GenCollectedHeap*)heap;
1100 }
1101 
1102 void GenCollectedHeap::prepare_for_compaction() {
1103   // Start by compacting into same gen.
1104   CompactPoint cp(_old_gen);
1105   _old_gen->prepare_for_compaction(&cp);
1106   _young_gen->prepare_for_compaction(&cp);
1107 }
1108 
1109 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1110   if (!silent) {
1111     gclog_or_tty->print("%s", _old_gen->name());
1112     gclog_or_tty->print(" ");
1113   }
1114   _old_gen->verify();
1115 
1116   if (!silent) {
1117     gclog_or_tty->print("%s", _young_gen->name());
1118     gclog_or_tty->print(" ");
1119   }
1120   _young_gen->verify();
1121 
1122   if (!silent) {
1123     gclog_or_tty->print("remset ");
1124   }
1125   rem_set()->verify();
1126 }
1127 
1128 void GenCollectedHeap::print_on(outputStream* st) const {
1129   _young_gen->print_on(st);
1130   _old_gen->print_on(st);
1131   MetaspaceAux::print_on(st);
1132 }
1133 
1134 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1135   if (workers() != NULL) {
1136     workers()->threads_do(tc);
1137   }
1138 #if INCLUDE_ALL_GCS
1139   if (UseConcMarkSweepGC) {
1140     ConcurrentMarkSweepThread::threads_do(tc);
1141   }
1142 #endif // INCLUDE_ALL_GCS
1143 }
1144 
1145 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1146 #if INCLUDE_ALL_GCS
1147   if (UseConcMarkSweepGC) {
1148     workers()->print_worker_threads_on(st);
1149     ConcurrentMarkSweepThread::print_all_on(st);
1150   }
1151 #endif // INCLUDE_ALL_GCS
1152 }
1153 
1154 void GenCollectedHeap::print_on_error(outputStream* st) const {
1155   this->CollectedHeap::print_on_error(st);
1156 
1157 #if INCLUDE_ALL_GCS
1158   if (UseConcMarkSweepGC) {
1159     st->cr();
1160     CMSCollector::print_on_error(st);
1161   }
1162 #endif // INCLUDE_ALL_GCS
1163 }
1164 
1165 void GenCollectedHeap::print_tracing_info() const {
1166   if (TraceYoungGenTime) {
1167     _young_gen->print_summary_info();
1168   }
1169   if (TraceOldGenTime) {
1170     _old_gen->print_summary_info();
1171   }
1172 }
1173 
1174 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1175   if (PrintGCDetails && Verbose) {
1176     gclog_or_tty->print(" "  SIZE_FORMAT
1177                         "->" SIZE_FORMAT
1178                         "("  SIZE_FORMAT ")",
1179                         prev_used, used(), capacity());
1180   } else {
1181     gclog_or_tty->print(" "  SIZE_FORMAT "K"
1182                         "->" SIZE_FORMAT "K"
1183                         "("  SIZE_FORMAT "K)",
1184                         prev_used / K, used() / K, capacity() / K);
1185   }
1186 }
1187 
1188 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1189  private:
1190   bool _full;
1191  public:
1192   void do_generation(Generation* gen) {
1193     gen->gc_prologue(_full);
1194   }
1195   GenGCPrologueClosure(bool full) : _full(full) {};
1196 };
1197 
1198 void GenCollectedHeap::gc_prologue(bool full) {
1199   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1200 
1201   always_do_update_barrier = false;
1202   // Fill TLAB's and such
1203   CollectedHeap::accumulate_statistics_all_tlabs();
1204   ensure_parsability(true);   // retire TLABs
1205 
1206   // Walk generations
1207   GenGCPrologueClosure blk(full);
1208   generation_iterate(&blk, false);  // not old-to-young.
1209 };
1210 
1211 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1212  private:
1213   bool _full;
1214  public:
1215   void do_generation(Generation* gen) {
1216     gen->gc_epilogue(_full);
1217   }
1218   GenGCEpilogueClosure(bool full) : _full(full) {};
1219 };
1220 
1221 void GenCollectedHeap::gc_epilogue(bool full) {
1222 #if defined(COMPILER2) || INCLUDE_JVMCI
1223   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1224   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1225   guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1226 #endif /* COMPILER2 || INCLUDE_JVMCI */
1227 
1228   resize_all_tlabs();
1229 
1230   GenGCEpilogueClosure blk(full);
1231   generation_iterate(&blk, false);  // not old-to-young.
1232 
1233   if (!CleanChunkPoolAsync) {
1234     Chunk::clean_chunk_pool();
1235   }
1236 
1237   MetaspaceCounters::update_performance_counters();
1238   CompressedClassSpaceCounters::update_performance_counters();
1239 
1240   always_do_update_barrier = UseConcMarkSweepGC;
1241 };
1242 
1243 #ifndef PRODUCT
1244 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1245  private:
1246  public:
1247   void do_generation(Generation* gen) {
1248     gen->record_spaces_top();
1249   }
1250 };
1251 
1252 void GenCollectedHeap::record_gen_tops_before_GC() {
1253   if (ZapUnusedHeapArea) {
1254     GenGCSaveTopsBeforeGCClosure blk;
1255     generation_iterate(&blk, false);  // not old-to-young.
1256   }
1257 }
1258 #endif  // not PRODUCT
1259 
1260 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1261  public:
1262   void do_generation(Generation* gen) {
1263     gen->ensure_parsability();
1264   }
1265 };
1266 
1267 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1268   CollectedHeap::ensure_parsability(retire_tlabs);
1269   GenEnsureParsabilityClosure ep_cl;
1270   generation_iterate(&ep_cl, false);
1271 }
1272 
1273 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1274                                               oop obj,
1275                                               size_t obj_size) {
1276   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1277   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1278   HeapWord* result = NULL;
1279 
1280   result = old_gen->expand_and_allocate(obj_size, false);
1281 
1282   if (result != NULL) {
1283     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1284   }
1285   return oop(result);
1286 }
1287 
1288 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1289   jlong _time;   // in ms
1290   jlong _now;    // in ms
1291 
1292  public:
1293   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1294 
1295   jlong time() { return _time; }
1296 
1297   void do_generation(Generation* gen) {
1298     _time = MIN2(_time, gen->time_of_last_gc(_now));
1299   }
1300 };
1301 
1302 jlong GenCollectedHeap::millis_since_last_gc() {
1303   // We need a monotonically non-decreasing time in ms but
1304   // os::javaTimeMillis() does not guarantee monotonicity.
1305   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1306   GenTimeOfLastGCClosure tolgc_cl(now);
1307   // iterate over generations getting the oldest
1308   // time that a generation was collected
1309   generation_iterate(&tolgc_cl, false);
1310 
1311   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1312   // provided the underlying platform provides such a time source
1313   // (and it is bug free). So we still have to guard against getting
1314   // back a time later than 'now'.
1315   jlong retVal = now - tolgc_cl.time();
1316   if (retVal < 0) {
1317     NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, retVal);)
1318     return 0;
1319   }
1320   return retVal;
1321 }
--- EOF ---