1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/collectorCounters.hpp"
  33 #include "gc/shared/gcLocker.inline.hpp"
  34 #include "gc/shared/gcTrace.hpp"
  35 #include "gc/shared/gcTraceTime.hpp"
  36 #include "gc/shared/genCollectedHeap.hpp"
  37 #include "gc/shared/genOopClosures.inline.hpp"
  38 #include "gc/shared/generationSpec.hpp"
  39 #include "gc/shared/space.hpp"
  40 #include "gc/shared/strongRootsScope.hpp"
  41 #include "gc/shared/vmGCOperations.hpp"
  42 #include "gc/shared/workgroup.hpp"
  43 #include "memory/filemap.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/biasedLocking.hpp"
  47 #include "runtime/fprofiler.hpp"
  48 #include "runtime/handles.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 #include "runtime/java.hpp"
  51 #include "runtime/vmThread.hpp"
  52 #include "services/management.hpp"
  53 #include "services/memoryService.hpp"
  54 #include "utilities/macros.hpp"
  55 #include "utilities/stack.inline.hpp"
  56 #include "utilities/vmError.hpp"
  57 #if INCLUDE_ALL_GCS
  58 #include "gc/cms/concurrentMarkSweepThread.hpp"
  59 #include "gc/cms/vmCMSOperations.hpp"
  60 #endif // INCLUDE_ALL_GCS
  61 
  62 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
  63 
  64 // The set of potentially parallel tasks in root scanning.
  65 enum GCH_strong_roots_tasks {
  66   GCH_PS_Universe_oops_do,
  67   GCH_PS_JNIHandles_oops_do,
  68   GCH_PS_ObjectSynchronizer_oops_do,
  69   GCH_PS_FlatProfiler_oops_do,
  70   GCH_PS_Management_oops_do,
  71   GCH_PS_SystemDictionary_oops_do,
  72   GCH_PS_ClassLoaderDataGraph_oops_do,
  73   GCH_PS_jvmti_oops_do,
  74   GCH_PS_CodeCache_oops_do,
  75   GCH_PS_younger_gens,
  76   // Leave this one last.
  77   GCH_PS_NumElements
  78 };
  79 
  80 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
  81   CollectedHeap(),
  82   _rem_set(NULL),
  83   _gen_policy(policy),
  84   _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
  85   _full_collections_completed(0)
  86 {
  87   assert(policy != NULL, "Sanity check");
  88   if (UseConcMarkSweepGC) {
  89     _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
  90                             /* are_GC_task_threads */true,
  91                             /* are_ConcurrentGC_threads */false);
  92     _workers->initialize_workers();
  93   } else {
  94     // Serial GC does not use workers.
  95     _workers = NULL;
  96   }
  97 }
  98 
  99 jint GenCollectedHeap::initialize() {
 100   CollectedHeap::pre_initialize();
 101 
 102   // While there are no constraints in the GC code that HeapWordSize
 103   // be any particular value, there are multiple other areas in the
 104   // system which believe this to be true (e.g. oop->object_size in some
 105   // cases incorrectly returns the size in wordSize units rather than
 106   // HeapWordSize).
 107   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 108 
 109   // Allocate space for the heap.
 110 
 111   char* heap_address;
 112   ReservedSpace heap_rs;
 113 
 114   size_t heap_alignment = collector_policy()->heap_alignment();
 115 
 116   heap_address = allocate(heap_alignment, &heap_rs);
 117 
 118   if (!heap_rs.is_reserved()) {
 119     vm_shutdown_during_initialization(
 120       "Could not reserve enough space for object heap");
 121     return JNI_ENOMEM;
 122   }
 123 
 124   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 125 
 126   _rem_set = collector_policy()->create_rem_set(reserved_region());
 127   set_barrier_set(rem_set()->bs());
 128 
 129   ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
 130   _young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set());
 131   heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
 132 
 133   ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
 134   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, 1, rem_set());
 135   clear_incremental_collection_failed();
 136 
 137 #if INCLUDE_ALL_GCS
 138   // If we are running CMS, create the collector responsible
 139   // for collecting the CMS generations.
 140   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 141     bool success = create_cms_collector();
 142     if (!success) return JNI_ENOMEM;
 143   }
 144 #endif // INCLUDE_ALL_GCS
 145 
 146   return JNI_OK;
 147 }
 148 
 149 char* GenCollectedHeap::allocate(size_t alignment,
 150                                  ReservedSpace* heap_rs){
 151   // Now figure out the total size.
 152   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 153   assert(alignment % pageSize == 0, "Must be");
 154 
 155   GenerationSpec* young_spec = gen_policy()->young_gen_spec();
 156   GenerationSpec* old_spec = gen_policy()->old_gen_spec();
 157 
 158   // Check for overflow.
 159   size_t total_reserved = young_spec->max_size() + old_spec->max_size();
 160   if (total_reserved < young_spec->max_size()) {
 161     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 162                                   "the maximum representable size");
 163   }
 164   assert(total_reserved % alignment == 0,
 165          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 166                  SIZE_FORMAT, total_reserved, alignment));
 167 
 168   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 169   return heap_rs->base();
 170 }
 171 
 172 void GenCollectedHeap::post_initialize() {
 173   CollectedHeap::post_initialize();
 174   ref_processing_init();
 175   GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
 176   guarantee(policy->is_generation_policy(), "Illegal policy type");
 177   assert((_young_gen->kind() == Generation::DefNew) ||
 178          (_young_gen->kind() == Generation::ParNew),
 179     "Wrong youngest generation type");
 180   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 181 
 182   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
 183          _old_gen->kind() == Generation::MarkSweepCompact,
 184     "Wrong generation kind");
 185 
 186   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 187                                  _old_gen->capacity(),
 188                                  def_new_gen->from()->capacity());
 189   policy->initialize_gc_policy_counters();
 190 }
 191 
 192 void GenCollectedHeap::ref_processing_init() {
 193   _young_gen->ref_processor_init();
 194   _old_gen->ref_processor_init();
 195 }
 196 
 197 size_t GenCollectedHeap::capacity() const {
 198   return _young_gen->capacity() + _old_gen->capacity();
 199 }
 200 
 201 size_t GenCollectedHeap::used() const {
 202   return _young_gen->used() + _old_gen->used();
 203 }
 204 
 205 // Save the "used_region" for generations level and lower.
 206 void GenCollectedHeap::save_used_regions(int level) {
 207   assert(level == 0 || level == 1, "Illegal level parameter");
 208   if (level == 1) {
 209     _old_gen->save_used_region();
 210   }
 211   _young_gen->save_used_region();
 212 }
 213 
 214 size_t GenCollectedHeap::max_capacity() const {
 215   return _young_gen->max_capacity() + _old_gen->max_capacity();
 216 }
 217 
 218 // Update the _full_collections_completed counter
 219 // at the end of a stop-world full GC.
 220 unsigned int GenCollectedHeap::update_full_collections_completed() {
 221   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 222   assert(_full_collections_completed <= _total_full_collections,
 223          "Can't complete more collections than were started");
 224   _full_collections_completed = _total_full_collections;
 225   ml.notify_all();
 226   return _full_collections_completed;
 227 }
 228 
 229 // Update the _full_collections_completed counter, as appropriate,
 230 // at the end of a concurrent GC cycle. Note the conditional update
 231 // below to allow this method to be called by a concurrent collector
 232 // without synchronizing in any manner with the VM thread (which
 233 // may already have initiated a STW full collection "concurrently").
 234 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
 235   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 236   assert((_full_collections_completed <= _total_full_collections) &&
 237          (count <= _total_full_collections),
 238          "Can't complete more collections than were started");
 239   if (count > _full_collections_completed) {
 240     _full_collections_completed = count;
 241     ml.notify_all();
 242   }
 243   return _full_collections_completed;
 244 }
 245 
 246 
 247 #ifndef PRODUCT
 248 // Override of memory state checking method in CollectedHeap:
 249 // Some collectors (CMS for example) can't have badHeapWordVal written
 250 // in the first two words of an object. (For instance , in the case of
 251 // CMS these words hold state used to synchronize between certain
 252 // (concurrent) GC steps and direct allocating mutators.)
 253 // The skip_header_HeapWords() method below, allows us to skip
 254 // over the requisite number of HeapWord's. Note that (for
 255 // generational collectors) this means that those many words are
 256 // skipped in each object, irrespective of the generation in which
 257 // that object lives. The resultant loss of precision seems to be
 258 // harmless and the pain of avoiding that imprecision appears somewhat
 259 // higher than we are prepared to pay for such rudimentary debugging
 260 // support.
 261 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
 262                                                          size_t size) {
 263   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 264     // We are asked to check a size in HeapWords,
 265     // but the memory is mangled in juint words.
 266     juint* start = (juint*) (addr + skip_header_HeapWords());
 267     juint* end   = (juint*) (addr + size);
 268     for (juint* slot = start; slot < end; slot += 1) {
 269       assert(*slot == badHeapWordVal,
 270              "Found non badHeapWordValue in pre-allocation check");
 271     }
 272   }
 273 }
 274 #endif
 275 
 276 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 277                                                bool is_tlab,
 278                                                bool first_only) {
 279   HeapWord* res = NULL;
 280 
 281   if (_young_gen->should_allocate(size, is_tlab)) {
 282     res = _young_gen->allocate(size, is_tlab);
 283     if (res != NULL || first_only) {
 284       return res;
 285     }
 286   }
 287 
 288   if (_old_gen->should_allocate(size, is_tlab)) {
 289     res = _old_gen->allocate(size, is_tlab);
 290   }
 291 
 292   return res;
 293 }
 294 
 295 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 296                                          bool* gc_overhead_limit_was_exceeded) {
 297   return collector_policy()->mem_allocate_work(size,
 298                                                false /* is_tlab */,
 299                                                gc_overhead_limit_was_exceeded);
 300 }
 301 
 302 bool GenCollectedHeap::must_clear_all_soft_refs() {
 303   return _gc_cause == GCCause::_last_ditch_collection;
 304 }
 305 
 306 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 307   return UseConcMarkSweepGC &&
 308          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
 309           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
 310 }
 311 
 312 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 313                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 314                                           bool restore_marks_for_biased_locking) {
 315   // Timer for individual generations. Last argument is false: no CR
 316   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 317   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 318   // so we can assume here that the next GC id is what we want.
 319   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 320   TraceCollectorStats tcs(gen->counters());
 321   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 322 
 323   size_t prev_used = gen->used();
 324   gen->stat_record()->invocations++;
 325   gen->stat_record()->accumulated_time.start();
 326 
 327   // Must be done anew before each collection because
 328   // a previous collection will do mangling and will
 329   // change top of some spaces.
 330   record_gen_tops_before_GC();
 331 
 332   if (PrintGC && Verbose) {
 333     gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
 334                         gen->level(),
 335                         gen->stat_record()->invocations,
 336                         size * HeapWordSize);
 337   }
 338 
 339   if (run_verification && VerifyBeforeGC) {
 340     HandleMark hm;  // Discard invalid handles created during verification
 341     Universe::verify(" VerifyBeforeGC:");
 342   }
 343   COMPILER2_PRESENT(DerivedPointerTable::clear());
 344 
 345   if (restore_marks_for_biased_locking) {
 346     // We perform this mark word preservation work lazily
 347     // because it's only at this point that we know whether we
 348     // absolutely have to do it; we want to avoid doing it for
 349     // scavenge-only collections where it's unnecessary
 350     BiasedLocking::preserve_marks();
 351   }
 352 
 353   // Do collection work
 354   {
 355     // Note on ref discovery: For what appear to be historical reasons,
 356     // GCH enables and disabled (by enqueing) refs discovery.
 357     // In the future this should be moved into the generation's
 358     // collect method so that ref discovery and enqueueing concerns
 359     // are local to a generation. The collect method could return
 360     // an appropriate indication in the case that notification on
 361     // the ref lock was needed. This will make the treatment of
 362     // weak refs more uniform (and indeed remove such concerns
 363     // from GCH). XXX
 364 
 365     HandleMark hm;  // Discard invalid handles created during gc
 366     save_marks();   // save marks for all gens
 367     // We want to discover references, but not process them yet.
 368     // This mode is disabled in process_discovered_references if the
 369     // generation does some collection work, or in
 370     // enqueue_discovered_references if the generation returns
 371     // without doing any work.
 372     ReferenceProcessor* rp = gen->ref_processor();
 373     // If the discovery of ("weak") refs in this generation is
 374     // atomic wrt other collectors in this configuration, we
 375     // are guaranteed to have empty discovered ref lists.
 376     if (rp->discovery_is_atomic()) {
 377       rp->enable_discovery();
 378       rp->setup_policy(clear_soft_refs);
 379     } else {
 380       // collect() below will enable discovery as appropriate
 381     }
 382     gen->collect(full, clear_soft_refs, size, is_tlab);
 383     if (!rp->enqueuing_is_done()) {
 384       rp->enqueue_discovered_references();
 385     } else {
 386       rp->set_enqueuing_is_done(false);
 387     }
 388     rp->verify_no_references_recorded();
 389   }
 390 
 391   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 392 
 393   gen->stat_record()->accumulated_time.stop();
 394 
 395   update_gc_stats(gen->level(), full);
 396 
 397   if (run_verification && VerifyAfterGC) {
 398     HandleMark hm;  // Discard invalid handles created during verification
 399     Universe::verify(" VerifyAfterGC:");
 400   }
 401 
 402   if (PrintGCDetails) {
 403     gclog_or_tty->print(":");
 404     gen->print_heap_change(prev_used);
 405   }
 406 }
 407 
 408 void GenCollectedHeap::do_collection(bool   full,
 409                                      bool   clear_all_soft_refs,
 410                                      size_t size,
 411                                      bool   is_tlab,
 412                                      int    max_level) {
 413   ResourceMark rm;
 414   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 415 
 416   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 417   assert(my_thread->is_VM_thread() ||
 418          my_thread->is_ConcurrentGC_thread(),
 419          "incorrect thread type capability");
 420   assert(Heap_lock->is_locked(),
 421          "the requesting thread should have the Heap_lock");
 422   guarantee(!is_gc_active(), "collection is not reentrant");
 423 
 424   if (GC_locker::check_active_before_gc()) {
 425     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 426   }
 427 
 428   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 429                           collector_policy()->should_clear_all_soft_refs();
 430 
 431   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 432 
 433   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 434 
 435   print_heap_before_gc();
 436 
 437   {
 438     FlagSetting fl(_is_gc_active, true);
 439 
 440     bool complete = full && (max_level == 1 /* old */);
 441     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 442     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 443     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 444     // so we can assume here that the next GC id is what we want.
 445     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
 446 
 447     gc_prologue(complete);
 448     increment_total_collections(complete);
 449 
 450     size_t gch_prev_used = used();
 451     bool run_verification = total_collections() >= VerifyGCStartAt;
 452 
 453     bool prepared_for_verification = false;
 454     int max_level_collected = 0;
 455     bool old_collects_young = (max_level == 1) &&
 456                               full &&
 457                               _old_gen->full_collects_younger_generations();
 458     if (!old_collects_young &&
 459         _young_gen->should_collect(full, size, is_tlab)) {
 460       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 461         prepare_for_verify();
 462         prepared_for_verification = true;
 463       }
 464 
 465       assert(!_young_gen->performs_in_place_marking(), "No young generation do in place marking");
 466       collect_generation(_young_gen,
 467                          full,
 468                          size,
 469                          is_tlab,
 470                          run_verification && VerifyGCLevel <= 0,
 471                          do_clear_all_soft_refs,
 472                          false);
 473 
 474       if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 475           size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 476         // Allocation request was met by young GC.
 477         size = 0;
 478       }
 479     }
 480 
 481     bool must_restore_marks_for_biased_locking = false;
 482 
 483     if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
 484       if (!complete) {
 485         // The full_collections increment was missed above.
 486         increment_total_full_collections();
 487       }
 488 
 489       pre_full_gc_dump(NULL);    // do any pre full gc dumps
 490 
 491       if (!prepared_for_verification && run_verification &&
 492           VerifyGCLevel <= 1 && VerifyBeforeGC) {
 493         prepare_for_verify();
 494       }
 495 
 496       assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
 497       collect_generation(_old_gen,
 498                          full,
 499                          size,
 500                          is_tlab,
 501                          run_verification && VerifyGCLevel <= 1,
 502                          do_clear_all_soft_refs,
 503                          true);
 504 
 505       must_restore_marks_for_biased_locking = true;
 506       max_level_collected = 1;
 507     }
 508 
 509     // Update "complete" boolean wrt what actually transpired --
 510     // for instance, a promotion failure could have led to
 511     // a whole heap collection.
 512     complete = complete || (max_level_collected == 1 /* old */);
 513 
 514     if (complete) { // We did a "major" collection
 515       // FIXME: See comment at pre_full_gc_dump call
 516       post_full_gc_dump(NULL);   // do any post full gc dumps
 517     }
 518 
 519     if (PrintGCDetails) {
 520       print_heap_change(gch_prev_used);
 521 
 522       // Print metaspace info for full GC with PrintGCDetails flag.
 523       if (complete) {
 524         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 525       }
 526     }
 527 
 528     // Adjust generation sizes.
 529     if (max_level_collected == 1 /* old */) {
 530       _old_gen->compute_new_size();
 531     }
 532     _young_gen->compute_new_size();
 533 
 534     if (complete) {
 535       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 536       ClassLoaderDataGraph::purge();
 537       MetaspaceAux::verify_metrics();
 538       // Resize the metaspace capacity after full collections
 539       MetaspaceGC::compute_new_size();
 540       update_full_collections_completed();
 541     }
 542 
 543     // Track memory usage and detect low memory after GC finishes
 544     MemoryService::track_memory_usage();
 545 
 546     gc_epilogue(complete);
 547 
 548     if (must_restore_marks_for_biased_locking) {
 549       BiasedLocking::restore_marks();
 550     }
 551   }
 552 
 553   print_heap_after_gc();
 554 
 555 #ifdef TRACESPINNING
 556   ParallelTaskTerminator::print_termination_counts();
 557 #endif
 558 }
 559 
 560 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 561   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
 562 }
 563 
 564 void GenCollectedHeap::set_par_threads(uint t) {
 565   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
 566   CollectedHeap::set_par_threads(t);
 567 }
 568 
 569 #ifdef ASSERT
 570 class AssertNonScavengableClosure: public OopClosure {
 571 public:
 572   virtual void do_oop(oop* p) {
 573     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 574       "Referent should not be scavengable.");  }
 575   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 576 };
 577 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 578 #endif
 579 
 580 void GenCollectedHeap::process_roots(StrongRootsScope* scope,
 581                                      ScanningOption so,
 582                                      OopClosure* strong_roots,
 583                                      OopClosure* weak_roots,
 584                                      CLDClosure* strong_cld_closure,
 585                                      CLDClosure* weak_cld_closure,
 586                                      CodeBlobClosure* code_roots) {
 587   // General roots.
 588   assert(Threads::thread_claim_parity() != 0, "must have called prologue code");
 589   assert(code_roots != NULL, "code root closure should always be set");
 590   // _n_termination for _process_strong_tasks should be set up stream
 591   // in a method not running in a GC worker.  Otherwise the GC worker
 592   // could be trying to change the termination condition while the task
 593   // is executing in another GC worker.
 594 
 595   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
 596     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 597   }
 598 
 599   // Some CLDs contained in the thread frames should be considered strong.
 600   // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
 601   CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
 602   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 603   CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 604 
 605   bool is_par = scope->n_threads() > 1;
 606   Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p);
 607 
 608   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
 609     Universe::oops_do(strong_roots);
 610   }
 611   // Global (strong) JNI handles
 612   if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
 613     JNIHandles::oops_do(strong_roots);
 614   }
 615 
 616   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
 617     ObjectSynchronizer::oops_do(strong_roots);
 618   }
 619   if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) {
 620     FlatProfiler::oops_do(strong_roots);
 621   }
 622   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
 623     Management::oops_do(strong_roots);
 624   }
 625   if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
 626     JvmtiExport::oops_do(strong_roots);
 627   }
 628 
 629   if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
 630     SystemDictionary::roots_oops_do(strong_roots, weak_roots);
 631   }
 632 
 633   // All threads execute the following. A specific chunk of buckets
 634   // from the StringTable are the individual tasks.
 635   if (weak_roots != NULL) {
 636     if (is_par) {
 637       StringTable::possibly_parallel_oops_do(weak_roots);
 638     } else {
 639       StringTable::oops_do(weak_roots);
 640     }
 641   }
 642 
 643   if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
 644     if (so & SO_ScavengeCodeCache) {
 645       assert(code_roots != NULL, "must supply closure for code cache");
 646 
 647       // We only visit parts of the CodeCache when scavenging.
 648       CodeCache::scavenge_root_nmethods_do(code_roots);
 649     }
 650     if (so & SO_AllCodeCache) {
 651       assert(code_roots != NULL, "must supply closure for code cache");
 652 
 653       // CMSCollector uses this to do intermediate-strength collections.
 654       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 655       CodeCache::blobs_do(code_roots);
 656     }
 657     // Verify that the code cache contents are not subject to
 658     // movement by a scavenging collection.
 659     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 660     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 661   }
 662 
 663 }
 664 
 665 void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
 666                                          int level,
 667                                          bool younger_gens_as_roots,
 668                                          ScanningOption so,
 669                                          bool only_strong_roots,
 670                                          OopsInGenClosure* not_older_gens,
 671                                          OopsInGenClosure* older_gens,
 672                                          CLDClosure* cld_closure) {
 673   const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
 674 
 675   bool is_moving_collection = false;
 676   if (level == 0 || is_adjust_phase) {
 677     // young collections are always moving
 678     is_moving_collection = true;
 679   }
 680 
 681   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
 682   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
 683   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 684 
 685   process_roots(scope, so,
 686                 not_older_gens, weak_roots,
 687                 cld_closure, weak_cld_closure,
 688                 &mark_code_closure);
 689 
 690   if (younger_gens_as_roots) {
 691     if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 692       if (level == 1) {
 693         not_older_gens->set_generation(_young_gen);
 694         _young_gen->oop_iterate(not_older_gens);
 695       }
 696       not_older_gens->reset_generation();
 697     }
 698   }
 699   // When collection is parallel, all threads get to cooperate to do
 700   // older-gen scanning.
 701   if (level == 0) {
 702     older_gens->set_generation(_old_gen);
 703     rem_set()->younger_refs_iterate(_old_gen, older_gens);
 704     older_gens->reset_generation();
 705   }
 706 
 707   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 708 }
 709 
 710 
 711 class AlwaysTrueClosure: public BoolObjectClosure {
 712 public:
 713   bool do_object_b(oop p) { return true; }
 714 };
 715 static AlwaysTrueClosure always_true;
 716 
 717 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 718   JNIHandles::weak_oops_do(&always_true, root_closure);
 719   _young_gen->ref_processor()->weak_oops_do(root_closure);
 720   _old_gen->ref_processor()->weak_oops_do(root_closure);
 721 }
 722 
 723 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 724 void GenCollectedHeap::                                                 \
 725 oop_since_save_marks_iterate(int level,                                 \
 726                              OopClosureType* cur,                       \
 727                              OopClosureType* older) {                   \
 728   if (level == 0) {                                                     \
 729     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 730     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \
 731   } else {                                                              \
 732     _old_gen->oop_since_save_marks_iterate##nv_suffix(cur);             \
 733   }                                                                     \
 734 }
 735 
 736 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 737 
 738 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 739 
 740 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
 741   if (level == 0 && !_young_gen->no_allocs_since_save_marks()) {
 742     return false;
 743   }
 744   return _old_gen->no_allocs_since_save_marks();
 745 }
 746 
 747 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 748   return _young_gen->supports_inline_contig_alloc();
 749 }
 750 
 751 HeapWord** GenCollectedHeap::top_addr() const {
 752   return _young_gen->top_addr();
 753 }
 754 
 755 HeapWord** GenCollectedHeap::end_addr() const {
 756   return _young_gen->end_addr();
 757 }
 758 
 759 // public collection interfaces
 760 
 761 void GenCollectedHeap::collect(GCCause::Cause cause) {
 762   if (should_do_concurrent_full_gc(cause)) {
 763 #if INCLUDE_ALL_GCS
 764     // mostly concurrent full collection
 765     collect_mostly_concurrent(cause);
 766 #else  // INCLUDE_ALL_GCS
 767     ShouldNotReachHere();
 768 #endif // INCLUDE_ALL_GCS
 769   } else if (cause == GCCause::_wb_young_gc) {
 770     // minor collection for WhiteBox API
 771     collect(cause, 0 /* young */);
 772   } else {
 773 #ifdef ASSERT
 774   if (cause == GCCause::_scavenge_alot) {
 775     // minor collection only
 776     collect(cause, 0 /* young */);
 777   } else {
 778     // Stop-the-world full collection
 779     collect(cause, 1 /* old */);
 780   }
 781 #else
 782     // Stop-the-world full collection
 783     collect(cause, 1 /* old */);
 784 #endif
 785   }
 786 }
 787 
 788 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
 789   // The caller doesn't have the Heap_lock
 790   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 791   MutexLocker ml(Heap_lock);
 792   collect_locked(cause, max_level);
 793 }
 794 
 795 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 796   // The caller has the Heap_lock
 797   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 798   collect_locked(cause, 1 /* old */);
 799 }
 800 
 801 // this is the private collection interface
 802 // The Heap_lock is expected to be held on entry.
 803 
 804 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
 805   // Read the GC count while holding the Heap_lock
 806   unsigned int gc_count_before      = total_collections();
 807   unsigned int full_gc_count_before = total_full_collections();
 808   {
 809     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 810     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 811                          cause, max_level);
 812     VMThread::execute(&op);
 813   }
 814 }
 815 
 816 #if INCLUDE_ALL_GCS
 817 bool GenCollectedHeap::create_cms_collector() {
 818 
 819   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
 820          "Unexpected generation kinds");
 821   // Skip two header words in the block content verification
 822   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 823   CMSCollector* collector = new CMSCollector(
 824     (ConcurrentMarkSweepGeneration*)_old_gen,
 825     _rem_set->as_CardTableRS(),
 826     (ConcurrentMarkSweepPolicy*) collector_policy());
 827 
 828   if (collector == NULL || !collector->completed_initialization()) {
 829     if (collector) {
 830       delete collector;  // Be nice in embedded situation
 831     }
 832     vm_shutdown_during_initialization("Could not create CMS collector");
 833     return false;
 834   }
 835   return true;  // success
 836 }
 837 
 838 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 839   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 840 
 841   MutexLocker ml(Heap_lock);
 842   // Read the GC counts while holding the Heap_lock
 843   unsigned int full_gc_count_before = total_full_collections();
 844   unsigned int gc_count_before      = total_collections();
 845   {
 846     MutexUnlocker mu(Heap_lock);
 847     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 848     VMThread::execute(&op);
 849   }
 850 }
 851 #endif // INCLUDE_ALL_GCS
 852 
 853 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 854    do_full_collection(clear_all_soft_refs, 1 /* old */);
 855 }
 856 
 857 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 858                                           int max_level) {
 859   int local_max_level;
 860   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 861       gc_cause() == GCCause::_gc_locker) {
 862     local_max_level = 0;
 863   } else {
 864     local_max_level = max_level;
 865   }
 866 
 867   do_collection(true                 /* full */,
 868                 clear_all_soft_refs  /* clear_all_soft_refs */,
 869                 0                    /* size */,
 870                 false                /* is_tlab */,
 871                 local_max_level      /* max_level */);
 872   // Hack XXX FIX ME !!!
 873   // A scavenge may not have been attempted, or may have
 874   // been attempted and failed, because the old gen was too full
 875   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
 876       incremental_collection_will_fail(false /* don't consult_young */)) {
 877     if (PrintGCDetails) {
 878       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 879                              "because scavenge failed");
 880     }
 881     // This time allow the old gen to be collected as well
 882     do_collection(true                 /* full */,
 883                   clear_all_soft_refs  /* clear_all_soft_refs */,
 884                   0                    /* size */,
 885                   false                /* is_tlab */,
 886                   1  /* old */         /* max_level */);
 887   }
 888 }
 889 
 890 bool GenCollectedHeap::is_in_young(oop p) {
 891   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 892   assert(result == _young_gen->is_in_reserved(p),
 893          err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
 894   return result;
 895 }
 896 
 897 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 898 bool GenCollectedHeap::is_in(const void* p) const {
 899   return _young_gen->is_in(p) || _old_gen->is_in(p);
 900 }
 901 
 902 #ifdef ASSERT
 903 // Don't implement this by using is_in_young().  This method is used
 904 // in some cases to check that is_in_young() is correct.
 905 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 906   assert(is_in_reserved(p) || p == NULL,
 907     "Does not work if address is non-null and outside of the heap");
 908   return p < _young_gen->reserved().end() && p != NULL;
 909 }
 910 #endif
 911 
 912 void GenCollectedHeap::oop_iterate_no_header(OopClosure* cl) {
 913   NoHeaderExtendedOopClosure no_header_cl(cl);
 914   oop_iterate(&no_header_cl);
 915 }
 916 
 917 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
 918   _young_gen->oop_iterate(cl);
 919   _old_gen->oop_iterate(cl);
 920 }
 921 
 922 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 923   _young_gen->object_iterate(cl);
 924   _old_gen->object_iterate(cl);
 925 }
 926 
 927 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 928   _young_gen->safe_object_iterate(cl);
 929   _old_gen->safe_object_iterate(cl);
 930 }
 931 
 932 Space* GenCollectedHeap::space_containing(const void* addr) const {
 933   Space* res = _young_gen->space_containing(addr);
 934   if (res != NULL) {
 935     return res;
 936   }
 937   res = _old_gen->space_containing(addr);
 938   assert(res != NULL, "Could not find containing space");
 939   return res;
 940 }
 941 
 942 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 943   assert(is_in_reserved(addr), "block_start of address outside of heap");
 944   if (_young_gen->is_in_reserved(addr)) {
 945     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 946     return _young_gen->block_start(addr);
 947   }
 948 
 949   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 950   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 951   return _old_gen->block_start(addr);
 952 }
 953 
 954 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
 955   assert(is_in_reserved(addr), "block_size of address outside of heap");
 956   if (_young_gen->is_in_reserved(addr)) {
 957     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 958     return _young_gen->block_size(addr);
 959   }
 960 
 961   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 962   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 963   return _old_gen->block_size(addr);
 964 }
 965 
 966 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 967   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 968   assert(block_start(addr) == addr, "addr must be a block start");
 969   if (_young_gen->is_in_reserved(addr)) {
 970     return _young_gen->block_is_obj(addr);
 971   }
 972 
 973   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 974   return _old_gen->block_is_obj(addr);
 975 }
 976 
 977 bool GenCollectedHeap::supports_tlab_allocation() const {
 978   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 979   return _young_gen->supports_tlab_allocation();
 980 }
 981 
 982 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 983   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 984   if (_young_gen->supports_tlab_allocation()) {
 985     return _young_gen->tlab_capacity();
 986   }
 987   return 0;
 988 }
 989 
 990 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 991   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 992   if (_young_gen->supports_tlab_allocation()) {
 993     return _young_gen->tlab_used();
 994   }
 995   return 0;
 996 }
 997 
 998 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 999   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1000   if (_young_gen->supports_tlab_allocation()) {
1001     return _young_gen->unsafe_max_tlab_alloc();
1002   }
1003   return 0;
1004 }
1005 
1006 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1007   bool gc_overhead_limit_was_exceeded;
1008   return collector_policy()->mem_allocate_work(size /* size */,
1009                                                true /* is_tlab */,
1010                                                &gc_overhead_limit_was_exceeded);
1011 }
1012 
1013 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1014 // from the list headed by "*prev_ptr".
1015 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1016   bool first = true;
1017   size_t min_size = 0;   // "first" makes this conceptually infinite.
1018   ScratchBlock **smallest_ptr, *smallest;
1019   ScratchBlock  *cur = *prev_ptr;
1020   while (cur) {
1021     assert(*prev_ptr == cur, "just checking");
1022     if (first || cur->num_words < min_size) {
1023       smallest_ptr = prev_ptr;
1024       smallest     = cur;
1025       min_size     = smallest->num_words;
1026       first        = false;
1027     }
1028     prev_ptr = &cur->next;
1029     cur     =  cur->next;
1030   }
1031   smallest      = *smallest_ptr;
1032   *smallest_ptr = smallest->next;
1033   return smallest;
1034 }
1035 
1036 // Sort the scratch block list headed by res into decreasing size order,
1037 // and set "res" to the result.
1038 static void sort_scratch_list(ScratchBlock*& list) {
1039   ScratchBlock* sorted = NULL;
1040   ScratchBlock* unsorted = list;
1041   while (unsorted) {
1042     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1043     smallest->next  = sorted;
1044     sorted          = smallest;
1045   }
1046   list = sorted;
1047 }
1048 
1049 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1050                                                size_t max_alloc_words) {
1051   ScratchBlock* res = NULL;
1052   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
1053   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
1054   sort_scratch_list(res);
1055   return res;
1056 }
1057 
1058 void GenCollectedHeap::release_scratch() {
1059   _young_gen->reset_scratch();
1060   _old_gen->reset_scratch();
1061 }
1062 
1063 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1064   void do_generation(Generation* gen) {
1065     gen->prepare_for_verify();
1066   }
1067 };
1068 
1069 void GenCollectedHeap::prepare_for_verify() {
1070   ensure_parsability(false);        // no need to retire TLABs
1071   GenPrepareForVerifyClosure blk;
1072   generation_iterate(&blk, false);
1073 }
1074 
1075 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1076                                           bool old_to_young) {
1077   if (old_to_young) {
1078     cl->do_generation(_old_gen);
1079     cl->do_generation(_young_gen);
1080   } else {
1081     cl->do_generation(_young_gen);
1082     cl->do_generation(_old_gen);
1083   }
1084 }
1085 
1086 bool GenCollectedHeap::is_maximal_no_gc() const {
1087   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1088 }
1089 
1090 void GenCollectedHeap::save_marks() {
1091   _young_gen->save_marks();
1092   _old_gen->save_marks();
1093 }
1094 
1095 GenCollectedHeap* GenCollectedHeap::heap() {
1096   CollectedHeap* heap = Universe::heap();
1097   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1098   assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1099   return (GenCollectedHeap*)heap;
1100 }
1101 
1102 void GenCollectedHeap::prepare_for_compaction() {
1103   // Start by compacting into same gen.
1104   CompactPoint cp(_old_gen);
1105   _old_gen->prepare_for_compaction(&cp);
1106   _young_gen->prepare_for_compaction(&cp);
1107 }
1108 
1109 GCStats* GenCollectedHeap::gc_stats(int level) const {
1110   if (level == 0) {
1111     return _young_gen->gc_stats();
1112   } else {
1113     return _old_gen->gc_stats();
1114   }
1115 }
1116 
1117 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1118   if (!silent) {
1119     gclog_or_tty->print("%s", _old_gen->name());
1120     gclog_or_tty->print(" ");
1121   }
1122   _old_gen->verify();
1123 
1124   if (!silent) {
1125     gclog_or_tty->print("%s", _young_gen->name());
1126     gclog_or_tty->print(" ");
1127   }
1128   _young_gen->verify();
1129 
1130   if (!silent) {
1131     gclog_or_tty->print("remset ");
1132   }
1133   rem_set()->verify();
1134 }
1135 
1136 void GenCollectedHeap::print_on(outputStream* st) const {
1137   _young_gen->print_on(st);
1138   _old_gen->print_on(st);
1139   MetaspaceAux::print_on(st);
1140 }
1141 
1142 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1143   if (workers() != NULL) {
1144     workers()->threads_do(tc);
1145   }
1146 #if INCLUDE_ALL_GCS
1147   if (UseConcMarkSweepGC) {
1148     ConcurrentMarkSweepThread::threads_do(tc);
1149   }
1150 #endif // INCLUDE_ALL_GCS
1151 }
1152 
1153 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1154 #if INCLUDE_ALL_GCS
1155   if (UseConcMarkSweepGC) {
1156     workers()->print_worker_threads_on(st);
1157     ConcurrentMarkSweepThread::print_all_on(st);
1158   }
1159 #endif // INCLUDE_ALL_GCS
1160 }
1161 
1162 void GenCollectedHeap::print_on_error(outputStream* st) const {
1163   this->CollectedHeap::print_on_error(st);
1164 
1165 #if INCLUDE_ALL_GCS
1166   if (UseConcMarkSweepGC) {
1167     st->cr();
1168     CMSCollector::print_on_error(st);
1169   }
1170 #endif // INCLUDE_ALL_GCS
1171 }
1172 
1173 void GenCollectedHeap::print_tracing_info() const {
1174   if (TraceYoungGenTime) {
1175     _young_gen->print_summary_info();
1176   }
1177   if (TraceOldGenTime) {
1178     _old_gen->print_summary_info();
1179   }
1180 }
1181 
1182 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1183   if (PrintGCDetails && Verbose) {
1184     gclog_or_tty->print(" "  SIZE_FORMAT
1185                         "->" SIZE_FORMAT
1186                         "("  SIZE_FORMAT ")",
1187                         prev_used, used(), capacity());
1188   } else {
1189     gclog_or_tty->print(" "  SIZE_FORMAT "K"
1190                         "->" SIZE_FORMAT "K"
1191                         "("  SIZE_FORMAT "K)",
1192                         prev_used / K, used() / K, capacity() / K);
1193   }
1194 }
1195 
1196 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1197  private:
1198   bool _full;
1199  public:
1200   void do_generation(Generation* gen) {
1201     gen->gc_prologue(_full);
1202   }
1203   GenGCPrologueClosure(bool full) : _full(full) {};
1204 };
1205 
1206 void GenCollectedHeap::gc_prologue(bool full) {
1207   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1208 
1209   always_do_update_barrier = false;
1210   // Fill TLAB's and such
1211   CollectedHeap::accumulate_statistics_all_tlabs();
1212   ensure_parsability(true);   // retire TLABs
1213 
1214   // Walk generations
1215   GenGCPrologueClosure blk(full);
1216   generation_iterate(&blk, false);  // not old-to-young.
1217 };
1218 
1219 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1220  private:
1221   bool _full;
1222  public:
1223   void do_generation(Generation* gen) {
1224     gen->gc_epilogue(_full);
1225   }
1226   GenGCEpilogueClosure(bool full) : _full(full) {};
1227 };
1228 
1229 void GenCollectedHeap::gc_epilogue(bool full) {
1230 #ifdef COMPILER2
1231   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1232   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1233   guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1234 #endif /* COMPILER2 */
1235 
1236   resize_all_tlabs();
1237 
1238   GenGCEpilogueClosure blk(full);
1239   generation_iterate(&blk, false);  // not old-to-young.
1240 
1241   if (!CleanChunkPoolAsync) {
1242     Chunk::clean_chunk_pool();
1243   }
1244 
1245   MetaspaceCounters::update_performance_counters();
1246   CompressedClassSpaceCounters::update_performance_counters();
1247 
1248   always_do_update_barrier = UseConcMarkSweepGC;
1249 };
1250 
1251 #ifndef PRODUCT
1252 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1253  private:
1254  public:
1255   void do_generation(Generation* gen) {
1256     gen->record_spaces_top();
1257   }
1258 };
1259 
1260 void GenCollectedHeap::record_gen_tops_before_GC() {
1261   if (ZapUnusedHeapArea) {
1262     GenGCSaveTopsBeforeGCClosure blk;
1263     generation_iterate(&blk, false);  // not old-to-young.
1264   }
1265 }
1266 #endif  // not PRODUCT
1267 
1268 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1269  public:
1270   void do_generation(Generation* gen) {
1271     gen->ensure_parsability();
1272   }
1273 };
1274 
1275 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1276   CollectedHeap::ensure_parsability(retire_tlabs);
1277   GenEnsureParsabilityClosure ep_cl;
1278   generation_iterate(&ep_cl, false);
1279 }
1280 
1281 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1282                                               oop obj,
1283                                               size_t obj_size) {
1284   guarantee(old_gen->level() == 1, "We only get here with an old generation");
1285   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1286   HeapWord* result = NULL;
1287 
1288   result = old_gen->expand_and_allocate(obj_size, false);
1289 
1290   if (result != NULL) {
1291     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1292   }
1293   return oop(result);
1294 }
1295 
1296 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1297   jlong _time;   // in ms
1298   jlong _now;    // in ms
1299 
1300  public:
1301   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1302 
1303   jlong time() { return _time; }
1304 
1305   void do_generation(Generation* gen) {
1306     _time = MIN2(_time, gen->time_of_last_gc(_now));
1307   }
1308 };
1309 
1310 jlong GenCollectedHeap::millis_since_last_gc() {
1311   // We need a monotonically non-decreasing time in ms but
1312   // os::javaTimeMillis() does not guarantee monotonicity.
1313   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1314   GenTimeOfLastGCClosure tolgc_cl(now);
1315   // iterate over generations getting the oldest
1316   // time that a generation was collected
1317   generation_iterate(&tolgc_cl, false);
1318 
1319   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1320   // provided the underlying platform provides such a time source
1321   // (and it is bug free). So we still have to guard against getting
1322   // back a time later than 'now'.
1323   jlong retVal = now - tolgc_cl.time();
1324   if (retVal < 0) {
1325     NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, retVal);)
1326     return 0;
1327   }
1328   return retVal;
1329 }