1 /*
   2  * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "gc_implementation/shared/collectorCounters.hpp"
  31 #include "gc_implementation/shared/gcTraceTime.hpp"
  32 #include "gc_implementation/shared/vmGCOperations.hpp"
  33 #include "gc_interface/collectedHeap.inline.hpp"
  34 #include "memory/filemap.hpp"
  35 #include "memory/gcLocker.inline.hpp"
  36 #include "memory/genCollectedHeap.hpp"
  37 #include "memory/genOopClosures.inline.hpp"
  38 #include "memory/generation.inline.hpp"
  39 #include "memory/generationSpec.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/sharedHeap.hpp"
  42 #include "memory/space.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "oops/oop.inline2.hpp"
  45 #include "runtime/biasedLocking.hpp"
  46 #include "runtime/fprofiler.hpp"
  47 #include "runtime/handles.hpp"
  48 #include "runtime/handles.inline.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/vmThread.hpp"
  51 #include "services/memoryService.hpp"
  52 #include "utilities/vmError.hpp"
  53 #include "utilities/workgroup.hpp"
  54 #include "utilities/macros.hpp"
  55 #if INCLUDE_ALL_GCS
  56 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  57 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  58 #endif // INCLUDE_ALL_GCS
  59 
  60 GenCollectedHeap* GenCollectedHeap::_gch;
  61 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
  62 
  63 // The set of potentially parallel tasks in strong root scanning.
  64 enum GCH_process_strong_roots_tasks {
  65   // We probably want to parallelize both of these internally, but for now...
  66   GCH_PS_younger_gens,
  67   // Leave this one last.
  68   GCH_PS_NumElements
  69 };
  70 
  71 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
  72   SharedHeap(policy),
  73   _gen_policy(policy),
  74   _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
  75   _full_collections_completed(0)
  76 {
  77   if (_gen_process_strong_tasks == NULL ||
  78       !_gen_process_strong_tasks->valid()) {
  79     vm_exit_during_initialization("Failed necessary allocation.");
  80   }
  81   assert(policy != NULL, "Sanity check");
  82 }
  83 
  84 jint GenCollectedHeap::initialize() {
  85   CollectedHeap::pre_initialize();
  86 
  87   int i;
  88   _n_gens = gen_policy()->number_of_generations();
  89 
  90   // While there are no constraints in the GC code that HeapWordSize
  91   // be any particular value, there are multiple other areas in the
  92   // system which believe this to be true (e.g. oop->object_size in some
  93   // cases incorrectly returns the size in wordSize units rather than
  94   // HeapWordSize).
  95   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  96 
  97   // The heap must be at least as aligned as generations.
  98   size_t gen_alignment = Generation::GenGrain;
  99 
 100   _gen_specs = gen_policy()->generations();
 101 
 102   // Make sure the sizes are all aligned.
 103   for (i = 0; i < _n_gens; i++) {
 104     _gen_specs[i]->align(gen_alignment);
 105   }
 106 
 107   // Allocate space for the heap.
 108 
 109   char* heap_address;
 110   size_t total_reserved = 0;
 111   int n_covered_regions = 0;
 112   ReservedSpace heap_rs;
 113 
 114   size_t heap_alignment = collector_policy()->heap_alignment();
 115 
 116   heap_address = allocate(heap_alignment, &total_reserved,
 117                           &n_covered_regions, &heap_rs);
 118 
 119   if (!heap_rs.is_reserved()) {
 120     vm_shutdown_during_initialization(
 121       "Could not reserve enough space for object heap");
 122     return JNI_ENOMEM;
 123   }
 124 
 125   _reserved = MemRegion((HeapWord*)heap_rs.base(),
 126                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 127 
 128   // It is important to do this in a way such that concurrent readers can't
 129   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 130   _reserved.set_word_size(0);
 131   _reserved.set_start((HeapWord*)heap_rs.base());
 132   size_t actual_heap_size = heap_rs.size();
 133   _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
 134 
 135   _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
 136   set_barrier_set(rem_set()->bs());
 137 
 138   _gch = this;
 139 
 140   for (i = 0; i < _n_gens; i++) {
 141     ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
 142     _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
 143     heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
 144   }
 145   clear_incremental_collection_failed();
 146 
 147 #if INCLUDE_ALL_GCS
 148   // If we are running CMS, create the collector responsible
 149   // for collecting the CMS generations.
 150   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 151     bool success = create_cms_collector();
 152     if (!success) return JNI_ENOMEM;
 153   }
 154 #endif // INCLUDE_ALL_GCS
 155 
 156   return JNI_OK;
 157 }
 158 
 159 
 160 char* GenCollectedHeap::allocate(size_t alignment,
 161                                  size_t* _total_reserved,
 162                                  int* _n_covered_regions,
 163                                  ReservedSpace* heap_rs){
 164   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
 165     "the maximum representable size";
 166 
 167   // Now figure out the total size.
 168   size_t total_reserved = 0;
 169   int n_covered_regions = 0;
 170   const size_t pageSize = UseLargePages ?
 171       os::large_page_size() : os::vm_page_size();
 172 
 173   assert(alignment % pageSize == 0, "Must be");
 174 
 175   for (int i = 0; i < _n_gens; i++) {
 176     total_reserved += _gen_specs[i]->max_size();
 177     if (total_reserved < _gen_specs[i]->max_size()) {
 178       vm_exit_during_initialization(overflow_msg);
 179     }
 180     n_covered_regions += _gen_specs[i]->n_covered_regions();
 181   }
 182   assert(total_reserved % alignment == 0,
 183          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 184                  SIZE_FORMAT, total_reserved, alignment));
 185 
 186   // Needed until the cardtable is fixed to have the right number
 187   // of covered regions.
 188   n_covered_regions += 2;
 189 
 190   *_total_reserved = total_reserved;
 191   *_n_covered_regions = n_covered_regions;
 192 
 193   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 194   return heap_rs->base();
 195 }
 196 
 197 
 198 void GenCollectedHeap::post_initialize() {
 199   SharedHeap::post_initialize();
 200   GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
 201   guarantee(policy->is_generation_policy(), "Illegal policy type");
 202   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
 203   assert(def_new_gen->kind() == Generation::DefNew ||
 204          def_new_gen->kind() == Generation::ParNew ||
 205          def_new_gen->kind() == Generation::ASParNew,
 206          "Wrong generation kind");
 207 
 208   Generation* old_gen = get_gen(1);
 209   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
 210          old_gen->kind() == Generation::ASConcurrentMarkSweep ||
 211          old_gen->kind() == Generation::MarkSweepCompact,
 212     "Wrong generation kind");
 213 
 214   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 215                                  old_gen->capacity(),
 216                                  def_new_gen->from()->capacity());
 217   policy->initialize_gc_policy_counters();
 218 }
 219 
 220 void GenCollectedHeap::ref_processing_init() {
 221   SharedHeap::ref_processing_init();
 222   for (int i = 0; i < _n_gens; i++) {
 223     _gens[i]->ref_processor_init();
 224   }
 225 }
 226 
 227 size_t GenCollectedHeap::capacity() const {
 228   size_t res = 0;
 229   for (int i = 0; i < _n_gens; i++) {
 230     res += _gens[i]->capacity();
 231   }
 232   return res;
 233 }
 234 
 235 size_t GenCollectedHeap::used() const {
 236   size_t res = 0;
 237   for (int i = 0; i < _n_gens; i++) {
 238     res += _gens[i]->used();
 239   }
 240   return res;
 241 }
 242 
 243 // Save the "used_region" for generations level and lower.
 244 void GenCollectedHeap::save_used_regions(int level) {
 245   assert(level < _n_gens, "Illegal level parameter");
 246   for (int i = level; i >= 0; i--) {
 247     _gens[i]->save_used_region();
 248   }
 249 }
 250 
 251 size_t GenCollectedHeap::max_capacity() const {
 252   size_t res = 0;
 253   for (int i = 0; i < _n_gens; i++) {
 254     res += _gens[i]->max_capacity();
 255   }
 256   return res;
 257 }
 258 
 259 // Update the _full_collections_completed counter
 260 // at the end of a stop-world full GC.
 261 unsigned int GenCollectedHeap::update_full_collections_completed() {
 262   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 263   assert(_full_collections_completed <= _total_full_collections,
 264          "Can't complete more collections than were started");
 265   _full_collections_completed = _total_full_collections;
 266   ml.notify_all();
 267   return _full_collections_completed;
 268 }
 269 
 270 // Update the _full_collections_completed counter, as appropriate,
 271 // at the end of a concurrent GC cycle. Note the conditional update
 272 // below to allow this method to be called by a concurrent collector
 273 // without synchronizing in any manner with the VM thread (which
 274 // may already have initiated a STW full collection "concurrently").
 275 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
 276   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 277   assert((_full_collections_completed <= _total_full_collections) &&
 278          (count <= _total_full_collections),
 279          "Can't complete more collections than were started");
 280   if (count > _full_collections_completed) {
 281     _full_collections_completed = count;
 282     ml.notify_all();
 283   }
 284   return _full_collections_completed;
 285 }
 286 
 287 
 288 #ifndef PRODUCT
 289 // Override of memory state checking method in CollectedHeap:
 290 // Some collectors (CMS for example) can't have badHeapWordVal written
 291 // in the first two words of an object. (For instance , in the case of
 292 // CMS these words hold state used to synchronize between certain
 293 // (concurrent) GC steps and direct allocating mutators.)
 294 // The skip_header_HeapWords() method below, allows us to skip
 295 // over the requisite number of HeapWord's. Note that (for
 296 // generational collectors) this means that those many words are
 297 // skipped in each object, irrespective of the generation in which
 298 // that object lives. The resultant loss of precision seems to be
 299 // harmless and the pain of avoiding that imprecision appears somewhat
 300 // higher than we are prepared to pay for such rudimentary debugging
 301 // support.
 302 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
 303                                                          size_t size) {
 304   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 305     // We are asked to check a size in HeapWords,
 306     // but the memory is mangled in juint words.
 307     juint* start = (juint*) (addr + skip_header_HeapWords());
 308     juint* end   = (juint*) (addr + size);
 309     for (juint* slot = start; slot < end; slot += 1) {
 310       assert(*slot == badHeapWordVal,
 311              "Found non badHeapWordValue in pre-allocation check");
 312     }
 313   }
 314 }
 315 #endif
 316 
 317 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 318                                                bool is_tlab,
 319                                                bool first_only) {
 320   HeapWord* res;
 321   for (int i = 0; i < _n_gens; i++) {
 322     if (_gens[i]->should_allocate(size, is_tlab)) {
 323       res = _gens[i]->allocate(size, is_tlab);
 324       if (res != NULL) return res;
 325       else if (first_only) break;
 326     }
 327   }
 328   // Otherwise...
 329   return NULL;
 330 }
 331 
 332 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 333                                          bool* gc_overhead_limit_was_exceeded) {
 334   return collector_policy()->mem_allocate_work(size,
 335                                                false /* is_tlab */,
 336                                                gc_overhead_limit_was_exceeded);
 337 }
 338 
 339 bool GenCollectedHeap::must_clear_all_soft_refs() {
 340   return _gc_cause == GCCause::_last_ditch_collection;
 341 }
 342 
 343 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 344   return UseConcMarkSweepGC &&
 345          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
 346           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
 347 }
 348 
 349 void GenCollectedHeap::do_collection(bool  full,
 350                                      bool   clear_all_soft_refs,
 351                                      size_t size,
 352                                      bool   is_tlab,
 353                                      int    max_level) {
 354   bool prepared_for_verification = false;
 355   ResourceMark rm;
 356   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 357 
 358   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 359   assert(my_thread->is_VM_thread() ||
 360          my_thread->is_ConcurrentGC_thread(),
 361          "incorrect thread type capability");
 362   assert(Heap_lock->is_locked(),
 363          "the requesting thread should have the Heap_lock");
 364   guarantee(!is_gc_active(), "collection is not reentrant");
 365   assert(max_level < n_gens(), "sanity check");
 366 
 367   if (GC_locker::check_active_before_gc()) {
 368     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 369   }
 370 
 371   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 372                           collector_policy()->should_clear_all_soft_refs();
 373 
 374   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 375 
 376   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 377 
 378   print_heap_before_gc();
 379 
 380   {
 381     FlagSetting fl(_is_gc_active, true);
 382 
 383     bool complete = full && (max_level == (n_gens()-1));
 384     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 385     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 386     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 387     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
 388 
 389     gc_prologue(complete);
 390     increment_total_collections(complete);
 391 
 392     size_t gch_prev_used = used();
 393 
 394     int starting_level = 0;
 395     if (full) {
 396       // Search for the oldest generation which will collect all younger
 397       // generations, and start collection loop there.
 398       for (int i = max_level; i >= 0; i--) {
 399         if (_gens[i]->full_collects_younger_generations()) {
 400           starting_level = i;
 401           break;
 402         }
 403       }
 404     }
 405 
 406     bool must_restore_marks_for_biased_locking = false;
 407 
 408     int max_level_collected = starting_level;
 409     for (int i = starting_level; i <= max_level; i++) {
 410       if (_gens[i]->should_collect(full, size, is_tlab)) {
 411         if (i == n_gens() - 1) {  // a major collection is to happen
 412           if (!complete) {
 413             // The full_collections increment was missed above.
 414             increment_total_full_collections();
 415           }
 416           pre_full_gc_dump(NULL);    // do any pre full gc dumps
 417         }
 418         // Timer for individual generations. Last argument is false: no CR
 419         // FIXME: We should try to start the timing earlier to cover more of the GC pause
 420         GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL);
 421         TraceCollectorStats tcs(_gens[i]->counters());
 422         TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
 423 
 424         size_t prev_used = _gens[i]->used();
 425         _gens[i]->stat_record()->invocations++;
 426         _gens[i]->stat_record()->accumulated_time.start();
 427 
 428         // Must be done anew before each collection because
 429         // a previous collection will do mangling and will
 430         // change top of some spaces.
 431         record_gen_tops_before_GC();
 432 
 433         if (PrintGC && Verbose) {
 434           gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
 435                      i,
 436                      _gens[i]->stat_record()->invocations,
 437                      size*HeapWordSize);
 438         }
 439 
 440         if (VerifyBeforeGC && i >= VerifyGCLevel &&
 441             total_collections() >= VerifyGCStartAt) {
 442           HandleMark hm;  // Discard invalid handles created during verification
 443           if (!prepared_for_verification) {
 444             prepare_for_verify();
 445             prepared_for_verification = true;
 446           }
 447           Universe::verify(" VerifyBeforeGC:");
 448         }
 449         COMPILER2_PRESENT(DerivedPointerTable::clear());
 450 
 451         if (!must_restore_marks_for_biased_locking &&
 452             _gens[i]->performs_in_place_marking()) {
 453           // We perform this mark word preservation work lazily
 454           // because it's only at this point that we know whether we
 455           // absolutely have to do it; we want to avoid doing it for
 456           // scavenge-only collections where it's unnecessary
 457           must_restore_marks_for_biased_locking = true;
 458           BiasedLocking::preserve_marks();
 459         }
 460 
 461         // Do collection work
 462         {
 463           // Note on ref discovery: For what appear to be historical reasons,
 464           // GCH enables and disabled (by enqueing) refs discovery.
 465           // In the future this should be moved into the generation's
 466           // collect method so that ref discovery and enqueueing concerns
 467           // are local to a generation. The collect method could return
 468           // an appropriate indication in the case that notification on
 469           // the ref lock was needed. This will make the treatment of
 470           // weak refs more uniform (and indeed remove such concerns
 471           // from GCH). XXX
 472 
 473           HandleMark hm;  // Discard invalid handles created during gc
 474           save_marks();   // save marks for all gens
 475           // We want to discover references, but not process them yet.
 476           // This mode is disabled in process_discovered_references if the
 477           // generation does some collection work, or in
 478           // enqueue_discovered_references if the generation returns
 479           // without doing any work.
 480           ReferenceProcessor* rp = _gens[i]->ref_processor();
 481           // If the discovery of ("weak") refs in this generation is
 482           // atomic wrt other collectors in this configuration, we
 483           // are guaranteed to have empty discovered ref lists.
 484           if (rp->discovery_is_atomic()) {
 485             rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 486             rp->setup_policy(do_clear_all_soft_refs);
 487           } else {
 488             // collect() below will enable discovery as appropriate
 489           }
 490           _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
 491           if (!rp->enqueuing_is_done()) {
 492             rp->enqueue_discovered_references();
 493           } else {
 494             rp->set_enqueuing_is_done(false);
 495           }
 496           rp->verify_no_references_recorded();
 497         }
 498         max_level_collected = i;
 499 
 500         // Determine if allocation request was met.
 501         if (size > 0) {
 502           if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
 503             if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
 504               size = 0;
 505             }
 506           }
 507         }
 508 
 509         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 510 
 511         _gens[i]->stat_record()->accumulated_time.stop();
 512 
 513         update_gc_stats(i, full);
 514 
 515         if (VerifyAfterGC && i >= VerifyGCLevel &&
 516             total_collections() >= VerifyGCStartAt) {
 517           HandleMark hm;  // Discard invalid handles created during verification
 518           Universe::verify(" VerifyAfterGC:");
 519         }
 520 
 521         if (PrintGCDetails) {
 522           gclog_or_tty->print(":");
 523           _gens[i]->print_heap_change(prev_used);
 524         }
 525       }
 526     }
 527 
 528     // Update "complete" boolean wrt what actually transpired --
 529     // for instance, a promotion failure could have led to
 530     // a whole heap collection.
 531     complete = complete || (max_level_collected == n_gens() - 1);
 532 
 533     if (complete) { // We did a "major" collection
 534       // FIXME: See comment at pre_full_gc_dump call
 535       post_full_gc_dump(NULL);   // do any post full gc dumps
 536     }
 537 
 538     if (PrintGCDetails) {
 539       print_heap_change(gch_prev_used);
 540 
 541       // Print metaspace info for full GC with PrintGCDetails flag.
 542       if (complete) {
 543         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 544       }
 545     }
 546 
 547     for (int j = max_level_collected; j >= 0; j -= 1) {
 548       // Adjust generation sizes.
 549       _gens[j]->compute_new_size();
 550     }
 551 
 552     if (complete) {
 553       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 554       ClassLoaderDataGraph::purge();
 555       MetaspaceAux::verify_metrics();
 556       // Resize the metaspace capacity after full collections
 557       MetaspaceGC::compute_new_size();
 558       update_full_collections_completed();
 559     }
 560 
 561     // Track memory usage and detect low memory after GC finishes
 562     MemoryService::track_memory_usage();
 563 
 564     gc_epilogue(complete);
 565 
 566     if (must_restore_marks_for_biased_locking) {
 567       BiasedLocking::restore_marks();
 568     }
 569   }
 570 
 571   AdaptiveSizePolicy* sp = gen_policy()->size_policy();
 572   AdaptiveSizePolicyOutput(sp, total_collections());
 573 
 574   print_heap_after_gc();
 575 
 576 #ifdef TRACESPINNING
 577   ParallelTaskTerminator::print_termination_counts();
 578 #endif
 579 }
 580 
 581 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 582   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
 583 }
 584 
 585 void GenCollectedHeap::set_par_threads(uint t) {
 586   SharedHeap::set_par_threads(t);
 587   _gen_process_strong_tasks->set_n_threads(t);
 588 }
 589 
 590 void GenCollectedHeap::
 591 gen_process_strong_roots(int level,
 592                          bool younger_gens_as_roots,
 593                          bool activate_scope,
 594                          SharedHeap::ScanningOption so,
 595                          OopsInGenClosure* not_older_gens,
 596                          OopsInGenClosure* older_gens,
 597                          KlassClosure* klass_closure) {
 598   // General strong roots.
 599 
 600   SharedHeap::process_strong_roots(activate_scope, so,
 601                                    not_older_gens, klass_closure);
 602 
 603   if (younger_gens_as_roots) {
 604     if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 605       for (int i = 0; i < level; i++) {
 606         not_older_gens->set_generation(_gens[i]);
 607         _gens[i]->oop_iterate(not_older_gens);
 608       }
 609       not_older_gens->reset_generation();
 610     }
 611   }
 612   // When collection is parallel, all threads get to cooperate to do
 613   // older-gen scanning.
 614   for (int i = level+1; i < _n_gens; i++) {
 615     older_gens->set_generation(_gens[i]);
 616     rem_set()->younger_refs_iterate(_gens[i], older_gens);
 617     older_gens->reset_generation();
 618   }
 619 
 620   _gen_process_strong_tasks->all_tasks_completed();
 621 }
 622 
 623 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 624   SharedHeap::process_weak_roots(root_closure);
 625   // "Local" "weak" refs
 626   for (int i = 0; i < _n_gens; i++) {
 627     _gens[i]->ref_processor()->weak_oops_do(root_closure);
 628   }
 629 }
 630 
 631 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 632 void GenCollectedHeap::                                                 \
 633 oop_since_save_marks_iterate(int level,                                 \
 634                              OopClosureType* cur,                       \
 635                              OopClosureType* older) {                   \
 636   _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);           \
 637   for (int i = level+1; i < n_gens(); i++) {                            \
 638     _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);           \
 639   }                                                                     \
 640 }
 641 
 642 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 643 
 644 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 645 
 646 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
 647   for (int i = level; i < _n_gens; i++) {
 648     if (!_gens[i]->no_allocs_since_save_marks()) return false;
 649   }
 650   return true;
 651 }
 652 
 653 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 654   return _gens[0]->supports_inline_contig_alloc();
 655 }
 656 
 657 HeapWord** GenCollectedHeap::top_addr() const {
 658   return _gens[0]->top_addr();
 659 }
 660 
 661 HeapWord** GenCollectedHeap::end_addr() const {
 662   return _gens[0]->end_addr();
 663 }
 664 
 665 // public collection interfaces
 666 
 667 void GenCollectedHeap::collect(GCCause::Cause cause) {
 668   if (should_do_concurrent_full_gc(cause)) {
 669 #if INCLUDE_ALL_GCS
 670     // mostly concurrent full collection
 671     collect_mostly_concurrent(cause);
 672 #else  // INCLUDE_ALL_GCS
 673     ShouldNotReachHere();
 674 #endif // INCLUDE_ALL_GCS
 675   } else {
 676 #ifdef ASSERT
 677     if (cause == GCCause::_scavenge_alot) {
 678       // minor collection only
 679       collect(cause, 0);
 680     } else {
 681       // Stop-the-world full collection
 682       collect(cause, n_gens() - 1);
 683     }
 684 #else
 685     // Stop-the-world full collection
 686     collect(cause, n_gens() - 1);
 687 #endif
 688   }
 689 }
 690 
 691 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
 692   // The caller doesn't have the Heap_lock
 693   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 694   MutexLocker ml(Heap_lock);
 695   collect_locked(cause, max_level);
 696 }
 697 
 698 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 699   // The caller has the Heap_lock
 700   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 701   collect_locked(cause, n_gens() - 1);
 702 }
 703 
 704 // this is the private collection interface
 705 // The Heap_lock is expected to be held on entry.
 706 
 707 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
 708   // Read the GC count while holding the Heap_lock
 709   unsigned int gc_count_before      = total_collections();
 710   unsigned int full_gc_count_before = total_full_collections();
 711   {
 712     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 713     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 714                          cause, max_level);
 715     VMThread::execute(&op);
 716   }
 717 }
 718 
 719 #if INCLUDE_ALL_GCS
 720 bool GenCollectedHeap::create_cms_collector() {
 721 
 722   assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
 723          (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
 724          "Unexpected generation kinds");
 725   // Skip two header words in the block content verification
 726   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 727   CMSCollector* collector = new CMSCollector(
 728     (ConcurrentMarkSweepGeneration*)_gens[1],
 729     _rem_set->as_CardTableRS(),
 730     (ConcurrentMarkSweepPolicy*) collector_policy());
 731 
 732   if (collector == NULL || !collector->completed_initialization()) {
 733     if (collector) {
 734       delete collector;  // Be nice in embedded situation
 735     }
 736     vm_shutdown_during_initialization("Could not create CMS collector");
 737     return false;
 738   }
 739   return true;  // success
 740 }
 741 
 742 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 743   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 744 
 745   MutexLocker ml(Heap_lock);
 746   // Read the GC counts while holding the Heap_lock
 747   unsigned int full_gc_count_before = total_full_collections();
 748   unsigned int gc_count_before      = total_collections();
 749   {
 750     MutexUnlocker mu(Heap_lock);
 751     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 752     VMThread::execute(&op);
 753   }
 754 }
 755 #endif // INCLUDE_ALL_GCS
 756 
 757 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 758    do_full_collection(clear_all_soft_refs, _n_gens - 1);
 759 }
 760 
 761 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 762                                           int max_level) {
 763   int local_max_level;
 764   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 765       gc_cause() == GCCause::_gc_locker) {
 766     local_max_level = 0;
 767   } else {
 768     local_max_level = max_level;
 769   }
 770 
 771   do_collection(true                 /* full */,
 772                 clear_all_soft_refs  /* clear_all_soft_refs */,
 773                 0                    /* size */,
 774                 false                /* is_tlab */,
 775                 local_max_level      /* max_level */);
 776   // Hack XXX FIX ME !!!
 777   // A scavenge may not have been attempted, or may have
 778   // been attempted and failed, because the old gen was too full
 779   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
 780       incremental_collection_will_fail(false /* don't consult_young */)) {
 781     if (PrintGCDetails) {
 782       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 783                              "because scavenge failed");
 784     }
 785     // This time allow the old gen to be collected as well
 786     do_collection(true                 /* full */,
 787                   clear_all_soft_refs  /* clear_all_soft_refs */,
 788                   0                    /* size */,
 789                   false                /* is_tlab */,
 790                   n_gens() - 1         /* max_level */);
 791   }
 792 }
 793 
 794 bool GenCollectedHeap::is_in_young(oop p) {
 795   bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
 796   assert(result == _gens[0]->is_in_reserved(p),
 797          err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
 798   return result;
 799 }
 800 
 801 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 802 bool GenCollectedHeap::is_in(const void* p) const {
 803   #ifndef ASSERT
 804   guarantee(VerifyBeforeGC      ||
 805             VerifyDuringGC      ||
 806             VerifyBeforeExit    ||
 807             VerifyDuringStartup ||
 808             PrintAssembly       ||
 809             tty->count() != 0   ||   // already printing
 810             VerifyAfterGC       ||
 811     VMError::fatal_error_in_progress(), "too expensive");
 812 
 813   #endif
 814   // This might be sped up with a cache of the last generation that
 815   // answered yes.
 816   for (int i = 0; i < _n_gens; i++) {
 817     if (_gens[i]->is_in(p)) return true;
 818   }
 819   // Otherwise...
 820   return false;
 821 }
 822 
 823 #ifdef ASSERT
 824 // Don't implement this by using is_in_young().  This method is used
 825 // in some cases to check that is_in_young() is correct.
 826 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 827   assert(is_in_reserved(p) || p == NULL,
 828     "Does not work if address is non-null and outside of the heap");
 829   return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
 830 }
 831 #endif
 832 
 833 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
 834   for (int i = 0; i < _n_gens; i++) {
 835     _gens[i]->oop_iterate(cl);
 836   }
 837 }
 838 
 839 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 840   for (int i = 0; i < _n_gens; i++) {
 841     _gens[i]->object_iterate(cl);
 842   }
 843 }
 844 
 845 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 846   for (int i = 0; i < _n_gens; i++) {
 847     _gens[i]->safe_object_iterate(cl);
 848   }
 849 }
 850 
 851 Space* GenCollectedHeap::space_containing(const void* addr) const {
 852   for (int i = 0; i < _n_gens; i++) {
 853     Space* res = _gens[i]->space_containing(addr);
 854     if (res != NULL) return res;
 855   }
 856   // Otherwise...
 857   assert(false, "Could not find containing space");
 858   return NULL;
 859 }
 860 
 861 
 862 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 863   assert(is_in_reserved(addr), "block_start of address outside of heap");
 864   for (int i = 0; i < _n_gens; i++) {
 865     if (_gens[i]->is_in_reserved(addr)) {
 866       assert(_gens[i]->is_in(addr),
 867              "addr should be in allocated part of generation");
 868       return _gens[i]->block_start(addr);
 869     }
 870   }
 871   assert(false, "Some generation should contain the address");
 872   return NULL;
 873 }
 874 
 875 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
 876   assert(is_in_reserved(addr), "block_size of address outside of heap");
 877   for (int i = 0; i < _n_gens; i++) {
 878     if (_gens[i]->is_in_reserved(addr)) {
 879       assert(_gens[i]->is_in(addr),
 880              "addr should be in allocated part of generation");
 881       return _gens[i]->block_size(addr);
 882     }
 883   }
 884   assert(false, "Some generation should contain the address");
 885   return 0;
 886 }
 887 
 888 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 889   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 890   assert(block_start(addr) == addr, "addr must be a block start");
 891   for (int i = 0; i < _n_gens; i++) {
 892     if (_gens[i]->is_in_reserved(addr)) {
 893       return _gens[i]->block_is_obj(addr);
 894     }
 895   }
 896   assert(false, "Some generation should contain the address");
 897   return false;
 898 }
 899 
 900 bool GenCollectedHeap::supports_tlab_allocation() const {
 901   for (int i = 0; i < _n_gens; i += 1) {
 902     if (_gens[i]->supports_tlab_allocation()) {
 903       return true;
 904     }
 905   }
 906   return false;
 907 }
 908 
 909 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 910   size_t result = 0;
 911   for (int i = 0; i < _n_gens; i += 1) {
 912     if (_gens[i]->supports_tlab_allocation()) {
 913       result += _gens[i]->tlab_capacity();
 914     }
 915   }
 916   return result;
 917 }
 918 
 919 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 920   size_t result = 0;
 921   for (int i = 0; i < _n_gens; i += 1) {
 922     if (_gens[i]->supports_tlab_allocation()) {
 923       result += _gens[i]->tlab_used();
 924     }
 925   }
 926   return result;
 927 }
 928 
 929 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 930   size_t result = 0;
 931   for (int i = 0; i < _n_gens; i += 1) {
 932     if (_gens[i]->supports_tlab_allocation()) {
 933       result += _gens[i]->unsafe_max_tlab_alloc();
 934     }
 935   }
 936   return result;
 937 }
 938 
 939 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
 940   bool gc_overhead_limit_was_exceeded;
 941   return collector_policy()->mem_allocate_work(size /* size */,
 942                                                true /* is_tlab */,
 943                                                &gc_overhead_limit_was_exceeded);
 944 }
 945 
 946 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
 947 // from the list headed by "*prev_ptr".
 948 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
 949   bool first = true;
 950   size_t min_size = 0;   // "first" makes this conceptually infinite.
 951   ScratchBlock **smallest_ptr, *smallest;
 952   ScratchBlock  *cur = *prev_ptr;
 953   while (cur) {
 954     assert(*prev_ptr == cur, "just checking");
 955     if (first || cur->num_words < min_size) {
 956       smallest_ptr = prev_ptr;
 957       smallest     = cur;
 958       min_size     = smallest->num_words;
 959       first        = false;
 960     }
 961     prev_ptr = &cur->next;
 962     cur     =  cur->next;
 963   }
 964   smallest      = *smallest_ptr;
 965   *smallest_ptr = smallest->next;
 966   return smallest;
 967 }
 968 
 969 // Sort the scratch block list headed by res into decreasing size order,
 970 // and set "res" to the result.
 971 static void sort_scratch_list(ScratchBlock*& list) {
 972   ScratchBlock* sorted = NULL;
 973   ScratchBlock* unsorted = list;
 974   while (unsorted) {
 975     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
 976     smallest->next  = sorted;
 977     sorted          = smallest;
 978   }
 979   list = sorted;
 980 }
 981 
 982 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
 983                                                size_t max_alloc_words) {
 984   ScratchBlock* res = NULL;
 985   for (int i = 0; i < _n_gens; i++) {
 986     _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
 987   }
 988   sort_scratch_list(res);
 989   return res;
 990 }
 991 
 992 void GenCollectedHeap::release_scratch() {
 993   for (int i = 0; i < _n_gens; i++) {
 994     _gens[i]->reset_scratch();
 995   }
 996 }
 997 
 998 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
 999   void do_generation(Generation* gen) {
1000     gen->prepare_for_verify();
1001   }
1002 };
1003 
1004 void GenCollectedHeap::prepare_for_verify() {
1005   ensure_parsability(false);        // no need to retire TLABs
1006   GenPrepareForVerifyClosure blk;
1007   generation_iterate(&blk, false);
1008 }
1009 
1010 
1011 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1012                                           bool old_to_young) {
1013   if (old_to_young) {
1014     for (int i = _n_gens-1; i >= 0; i--) {
1015       cl->do_generation(_gens[i]);
1016     }
1017   } else {
1018     for (int i = 0; i < _n_gens; i++) {
1019       cl->do_generation(_gens[i]);
1020     }
1021   }
1022 }
1023 
1024 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1025   for (int i = 0; i < _n_gens; i++) {
1026     _gens[i]->space_iterate(cl, true);
1027   }
1028 }
1029 
1030 bool GenCollectedHeap::is_maximal_no_gc() const {
1031   for (int i = 0; i < _n_gens; i++) {
1032     if (!_gens[i]->is_maximal_no_gc()) {
1033       return false;
1034     }
1035   }
1036   return true;
1037 }
1038 
1039 void GenCollectedHeap::save_marks() {
1040   for (int i = 0; i < _n_gens; i++) {
1041     _gens[i]->save_marks();
1042   }
1043 }
1044 
1045 GenCollectedHeap* GenCollectedHeap::heap() {
1046   assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1047   assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1048   return _gch;
1049 }
1050 
1051 
1052 void GenCollectedHeap::prepare_for_compaction() {
1053   guarantee(_n_gens = 2, "Wrong number of generations");
1054   Generation* old_gen = _gens[1];
1055   // Start by compacting into same gen.
1056   CompactPoint cp(old_gen, NULL, NULL);
1057   old_gen->prepare_for_compaction(&cp);
1058   Generation* young_gen = _gens[0];
1059   young_gen->prepare_for_compaction(&cp);
1060 }
1061 
1062 GCStats* GenCollectedHeap::gc_stats(int level) const {
1063   return _gens[level]->gc_stats();
1064 }
1065 
1066 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1067   for (int i = _n_gens-1; i >= 0; i--) {
1068     Generation* g = _gens[i];
1069     if (!silent) {
1070       gclog_or_tty->print(g->name());
1071       gclog_or_tty->print(" ");
1072     }
1073     g->verify();
1074   }
1075   if (!silent) {
1076     gclog_or_tty->print("remset ");
1077   }
1078   rem_set()->verify();
1079 }
1080 
1081 void GenCollectedHeap::print_on(outputStream* st) const {
1082   for (int i = 0; i < _n_gens; i++) {
1083     _gens[i]->print_on(st);
1084   }
1085   MetaspaceAux::print_on(st);
1086 }
1087 
1088 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1089   if (workers() != NULL) {
1090     workers()->threads_do(tc);
1091   }
1092 #if INCLUDE_ALL_GCS
1093   if (UseConcMarkSweepGC) {
1094     ConcurrentMarkSweepThread::threads_do(tc);
1095   }
1096 #endif // INCLUDE_ALL_GCS
1097 }
1098 
1099 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1100 #if INCLUDE_ALL_GCS
1101   if (UseParNewGC) {
1102     workers()->print_worker_threads_on(st);
1103   }
1104   if (UseConcMarkSweepGC) {
1105     ConcurrentMarkSweepThread::print_all_on(st);
1106   }
1107 #endif // INCLUDE_ALL_GCS
1108 }
1109 
1110 void GenCollectedHeap::print_on_error(outputStream* st) const {
1111   this->CollectedHeap::print_on_error(st);
1112 
1113 #if INCLUDE_ALL_GCS
1114   if (UseConcMarkSweepGC) {
1115     st->cr();
1116     CMSCollector::print_on_error(st);
1117   }
1118 #endif // INCLUDE_ALL_GCS
1119 }
1120 
1121 void GenCollectedHeap::print_tracing_info() const {
1122   if (TraceGen0Time) {
1123     get_gen(0)->print_summary_info();
1124   }
1125   if (TraceGen1Time) {
1126     get_gen(1)->print_summary_info();
1127   }
1128 }
1129 
1130 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1131   if (PrintGCDetails && Verbose) {
1132     gclog_or_tty->print(" "  SIZE_FORMAT
1133                         "->" SIZE_FORMAT
1134                         "("  SIZE_FORMAT ")",
1135                         prev_used, used(), capacity());
1136   } else {
1137     gclog_or_tty->print(" "  SIZE_FORMAT "K"
1138                         "->" SIZE_FORMAT "K"
1139                         "("  SIZE_FORMAT "K)",
1140                         prev_used / K, used() / K, capacity() / K);
1141   }
1142 }
1143 
1144 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1145  private:
1146   bool _full;
1147  public:
1148   void do_generation(Generation* gen) {
1149     gen->gc_prologue(_full);
1150   }
1151   GenGCPrologueClosure(bool full) : _full(full) {};
1152 };
1153 
1154 void GenCollectedHeap::gc_prologue(bool full) {
1155   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1156 
1157   always_do_update_barrier = false;
1158   // Fill TLAB's and such
1159   CollectedHeap::accumulate_statistics_all_tlabs();
1160   ensure_parsability(true);   // retire TLABs
1161 
1162   // Walk generations
1163   GenGCPrologueClosure blk(full);
1164   generation_iterate(&blk, false);  // not old-to-young.
1165 };
1166 
1167 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1168  private:
1169   bool _full;
1170  public:
1171   void do_generation(Generation* gen) {
1172     gen->gc_epilogue(_full);
1173   }
1174   GenGCEpilogueClosure(bool full) : _full(full) {};
1175 };
1176 
1177 void GenCollectedHeap::gc_epilogue(bool full) {
1178 #ifdef COMPILER2
1179   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1180   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1181   guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1182 #endif /* COMPILER2 */
1183 
1184   resize_all_tlabs();
1185 
1186   GenGCEpilogueClosure blk(full);
1187   generation_iterate(&blk, false);  // not old-to-young.
1188 
1189   if (!CleanChunkPoolAsync) {
1190     Chunk::clean_chunk_pool();
1191   }
1192 
1193   MetaspaceCounters::update_performance_counters();
1194   CompressedClassSpaceCounters::update_performance_counters();
1195 
1196   always_do_update_barrier = UseConcMarkSweepGC;
1197 };
1198 
1199 #ifndef PRODUCT
1200 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1201  private:
1202  public:
1203   void do_generation(Generation* gen) {
1204     gen->record_spaces_top();
1205   }
1206 };
1207 
1208 void GenCollectedHeap::record_gen_tops_before_GC() {
1209   if (ZapUnusedHeapArea) {
1210     GenGCSaveTopsBeforeGCClosure blk;
1211     generation_iterate(&blk, false);  // not old-to-young.
1212   }
1213 }
1214 #endif  // not PRODUCT
1215 
1216 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1217  public:
1218   void do_generation(Generation* gen) {
1219     gen->ensure_parsability();
1220   }
1221 };
1222 
1223 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1224   CollectedHeap::ensure_parsability(retire_tlabs);
1225   GenEnsureParsabilityClosure ep_cl;
1226   generation_iterate(&ep_cl, false);
1227 }
1228 
1229 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1230                                               oop obj,
1231                                               size_t obj_size) {
1232   guarantee(old_gen->level() == 1, "We only get here with an old generation");
1233   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1234   HeapWord* result = NULL;
1235 
1236   result = old_gen->expand_and_allocate(obj_size, false);
1237 
1238   if (result != NULL) {
1239     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1240   }
1241   return oop(result);
1242 }
1243 
1244 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1245   jlong _time;   // in ms
1246   jlong _now;    // in ms
1247 
1248  public:
1249   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1250 
1251   jlong time() { return _time; }
1252 
1253   void do_generation(Generation* gen) {
1254     _time = MIN2(_time, gen->time_of_last_gc(_now));
1255   }
1256 };
1257 
1258 jlong GenCollectedHeap::millis_since_last_gc() {
1259   // We need a monotonically non-decreasing time in ms but
1260   // os::javaTimeMillis() does not guarantee monotonicity.
1261   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1262   GenTimeOfLastGCClosure tolgc_cl(now);
1263   // iterate over generations getting the oldest
1264   // time that a generation was collected
1265   generation_iterate(&tolgc_cl, false);
1266 
1267   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1268   // provided the underlying platform provides such a time source
1269   // (and it is bug free). So we still have to guard against getting
1270   // back a time later than 'now'.
1271   jlong retVal = now - tolgc_cl.time();
1272   if (retVal < 0) {
1273     NOT_PRODUCT(warning("time warp: "INT64_FORMAT, retVal);)
1274     return 0;
1275   }
1276   return retVal;
1277 }