1 /*
   2  * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "gc_implementation/shared/collectorCounters.hpp"
  31 #include "gc_implementation/shared/vmGCOperations.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/compactPermGen.hpp"
  34 #include "memory/filemap.hpp"
  35 #include "memory/gcLocker.inline.hpp"
  36 #include "memory/genCollectedHeap.hpp"
  37 #include "memory/genOopClosures.inline.hpp"
  38 #include "memory/generation.inline.hpp"
  39 #include "memory/generationSpec.hpp"
  40 #include "memory/permGen.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/sharedHeap.hpp"
  43 #include "memory/space.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "oops/oop.inline2.hpp"
  46 #include "runtime/aprofiler.hpp"
  47 #include "runtime/biasedLocking.hpp"
  48 #include "runtime/fprofiler.hpp"
  49 #include "runtime/handles.hpp"
  50 #include "runtime/handles.inline.hpp"
  51 #include "runtime/java.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "services/memoryService.hpp"
  54 #include "utilities/vmError.hpp"
  55 #include "utilities/workgroup.hpp"
  56 #ifndef SERIALGC
  57 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  58 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  59 #endif
  60 
  61 GenCollectedHeap* GenCollectedHeap::_gch;
  62 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
  63 
  64 // The set of potentially parallel tasks in strong root scanning.
  65 enum GCH_process_strong_roots_tasks {
  66   // We probably want to parallelize both of these internally, but for now...
  67   GCH_PS_younger_gens,
  68   // Leave this one last.
  69   GCH_PS_NumElements
  70 };
  71 
  72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
  73   SharedHeap(policy),
  74   _gen_policy(policy),
  75   _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
  76   _full_collections_completed(0)
  77 {
  78   if (_gen_process_strong_tasks == NULL ||
  79       !_gen_process_strong_tasks->valid()) {
  80     vm_exit_during_initialization("Failed necessary allocation.");
  81   }
  82   assert(policy != NULL, "Sanity check");
  83   _preloading_shared_classes = false;
  84 }
  85 
  86 jint GenCollectedHeap::initialize() {
  87   CollectedHeap::pre_initialize();
  88 
  89   int i;
  90   _n_gens = gen_policy()->number_of_generations();
  91 
  92   // While there are no constraints in the GC code that HeapWordSize
  93   // be any particular value, there are multiple other areas in the
  94   // system which believe this to be true (e.g. oop->object_size in some
  95   // cases incorrectly returns the size in wordSize units rather than
  96   // HeapWordSize).
  97   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  98 
  99   // The heap must be at least as aligned as generations.
 100   size_t alignment = Generation::GenGrain;
 101 
 102   _gen_specs = gen_policy()->generations();
 103   PermanentGenerationSpec *perm_gen_spec =
 104                                 collector_policy()->permanent_generation();
 105 
 106   // Make sure the sizes are all aligned.
 107   for (i = 0; i < _n_gens; i++) {
 108     _gen_specs[i]->align(alignment);
 109   }
 110   perm_gen_spec->align(alignment);
 111 
 112   // If we are dumping the heap, then allocate a wasted block of address
 113   // space in order to push the heap to a lower address.  This extra
 114   // address range allows for other (or larger) libraries to be loaded
 115   // without them occupying the space required for the shared spaces.
 116 
 117   if (DumpSharedSpaces) {
 118     uintx reserved = 0;
 119     uintx block_size = 64*1024*1024;
 120     while (reserved < SharedDummyBlockSize) {
 121       char* dummy = os::reserve_memory(block_size);
 122       reserved += block_size;
 123     }
 124   }
 125 
 126   // Allocate space for the heap.
 127 
 128   char* heap_address;
 129   size_t total_reserved = 0;
 130   int n_covered_regions = 0;
 131   ReservedSpace heap_rs(0);
 132 
 133   heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
 134                           &n_covered_regions, &heap_rs);
 135 
 136   if (UseSharedSpaces) {
 137     if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
 138       if (heap_rs.is_reserved()) {
 139         heap_rs.release();
 140       }
 141       FileMapInfo* mapinfo = FileMapInfo::current_info();
 142       mapinfo->fail_continue("Unable to reserve shared region.");
 143       allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
 144                &heap_rs);
 145     }
 146   }
 147 
 148   if (!heap_rs.is_reserved()) {
 149     vm_shutdown_during_initialization(
 150       "Could not reserve enough space for object heap");
 151     return JNI_ENOMEM;
 152   }
 153 
 154   _reserved = MemRegion((HeapWord*)heap_rs.base(),
 155                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 156 
 157   // It is important to do this in a way such that concurrent readers can't
 158   // temporarily think somethings in the heap.  (Seen this happen in asserts.)
 159   _reserved.set_word_size(0);
 160   _reserved.set_start((HeapWord*)heap_rs.base());
 161   size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
 162                                            - perm_gen_spec->misc_code_size();
 163   _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
 164 
 165   _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
 166   set_barrier_set(rem_set()->bs());
 167 
 168   _gch = this;
 169 
 170   for (i = 0; i < _n_gens; i++) {
 171     ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
 172                                               UseSharedSpaces, UseSharedSpaces);
 173     _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
 174     heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
 175   }
 176   _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
 177 
 178   clear_incremental_collection_failed();
 179 
 180 #ifndef SERIALGC
 181   // If we are running CMS, create the collector responsible
 182   // for collecting the CMS generations.
 183   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 184     bool success = create_cms_collector();
 185     if (!success) return JNI_ENOMEM;
 186   }
 187 #endif // SERIALGC
 188 
 189   return JNI_OK;
 190 }
 191 
 192 
 193 char* GenCollectedHeap::allocate(size_t alignment,
 194                                  PermanentGenerationSpec* perm_gen_spec,
 195                                  size_t* _total_reserved,
 196                                  int* _n_covered_regions,
 197                                  ReservedSpace* heap_rs){
 198   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
 199     "the maximum representable size";
 200 
 201   // Now figure out the total size.
 202   size_t total_reserved = 0;
 203   int n_covered_regions = 0;
 204   const size_t pageSize = UseLargePages ?
 205       os::large_page_size() : os::vm_page_size();
 206 
 207   for (int i = 0; i < _n_gens; i++) {
 208     total_reserved += _gen_specs[i]->max_size();
 209     if (total_reserved < _gen_specs[i]->max_size()) {
 210       vm_exit_during_initialization(overflow_msg);
 211     }
 212     n_covered_regions += _gen_specs[i]->n_covered_regions();
 213   }
 214   assert(total_reserved % pageSize == 0,
 215          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
 216                  SIZE_FORMAT, total_reserved, pageSize));
 217   total_reserved += perm_gen_spec->max_size();
 218   assert(total_reserved % pageSize == 0,
 219          err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
 220                  SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
 221                  pageSize, perm_gen_spec->max_size()));
 222 
 223   if (total_reserved < perm_gen_spec->max_size()) {
 224     vm_exit_during_initialization(overflow_msg);
 225   }
 226   n_covered_regions += perm_gen_spec->n_covered_regions();
 227 
 228   // Add the size of the data area which shares the same reserved area
 229   // as the heap, but which is not actually part of the heap.
 230   size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
 231 
 232   total_reserved += s;
 233   if (total_reserved < s) {
 234     vm_exit_during_initialization(overflow_msg);
 235   }
 236 
 237   if (UseLargePages) {
 238     assert(total_reserved != 0, "total_reserved cannot be 0");
 239     total_reserved = round_to(total_reserved, os::large_page_size());
 240     if (total_reserved < os::large_page_size()) {
 241       vm_exit_during_initialization(overflow_msg);
 242     }
 243   }
 244 
 245   // Calculate the address at which the heap must reside in order for
 246   // the shared data to be at the required address.
 247 
 248   char* heap_address;
 249   if (UseSharedSpaces) {
 250 
 251     // Calculate the address of the first word beyond the heap.
 252     FileMapInfo* mapinfo = FileMapInfo::current_info();
 253     int lr = CompactingPermGenGen::n_regions - 1;
 254     size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
 255     heap_address = mapinfo->region_base(lr) + capacity;
 256 
 257     // Calculate the address of the first word of the heap.
 258     heap_address -= total_reserved;
 259   } else {
 260     heap_address = NULL;  // any address will do.
 261     if (UseCompressedOops) {
 262       heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 263       *_total_reserved = total_reserved;
 264       *_n_covered_regions = n_covered_regions;
 265       *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 266                                    UseLargePages, heap_address);
 267 
 268       if (heap_address != NULL && !heap_rs->is_reserved()) {
 269         // Failed to reserve at specified address - the requested memory
 270         // region is taken already, for example, by 'java' launcher.
 271         // Try again to reserver heap higher.
 272         heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
 273         *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 274                                      UseLargePages, heap_address);
 275 
 276         if (heap_address != NULL && !heap_rs->is_reserved()) {
 277           // Failed to reserve at specified address again - give up.
 278           heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
 279           assert(heap_address == NULL, "");
 280           *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 281                                        UseLargePages, heap_address);
 282         }
 283       }
 284       return heap_address;
 285     }
 286   }
 287 
 288   *_total_reserved = total_reserved;
 289   *_n_covered_regions = n_covered_regions;
 290   *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 291                                UseLargePages, heap_address);
 292 
 293   return heap_address;
 294 }
 295 
 296 
 297 void GenCollectedHeap::post_initialize() {
 298   SharedHeap::post_initialize();
 299   TwoGenerationCollectorPolicy *policy =
 300     (TwoGenerationCollectorPolicy *)collector_policy();
 301   guarantee(policy->is_two_generation_policy(), "Illegal policy type");
 302   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
 303   assert(def_new_gen->kind() == Generation::DefNew ||
 304          def_new_gen->kind() == Generation::ParNew ||
 305          def_new_gen->kind() == Generation::ASParNew,
 306          "Wrong generation kind");
 307 
 308   Generation* old_gen = get_gen(1);
 309   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
 310          old_gen->kind() == Generation::ASConcurrentMarkSweep ||
 311          old_gen->kind() == Generation::MarkSweepCompact,
 312     "Wrong generation kind");
 313 
 314   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 315                                  old_gen->capacity(),
 316                                  def_new_gen->from()->capacity());
 317   policy->initialize_gc_policy_counters();
 318 }
 319 
 320 void GenCollectedHeap::ref_processing_init() {
 321   SharedHeap::ref_processing_init();
 322   for (int i = 0; i < _n_gens; i++) {
 323     _gens[i]->ref_processor_init();
 324   }
 325 }
 326 
 327 size_t GenCollectedHeap::capacity() const {
 328   size_t res = 0;
 329   for (int i = 0; i < _n_gens; i++) {
 330     res += _gens[i]->capacity();
 331   }
 332   return res;
 333 }
 334 
 335 size_t GenCollectedHeap::used() const {
 336   size_t res = 0;
 337   for (int i = 0; i < _n_gens; i++) {
 338     res += _gens[i]->used();
 339   }
 340   return res;
 341 }
 342 
 343 // Save the "used_region" for generations level and lower,
 344 // and, if perm is true, for perm gen.
 345 void GenCollectedHeap::save_used_regions(int level, bool perm) {
 346   assert(level < _n_gens, "Illegal level parameter");
 347   for (int i = level; i >= 0; i--) {
 348     _gens[i]->save_used_region();
 349   }
 350   if (perm) {
 351     perm_gen()->save_used_region();
 352   }
 353 }
 354 
 355 size_t GenCollectedHeap::max_capacity() const {
 356   size_t res = 0;
 357   for (int i = 0; i < _n_gens; i++) {
 358     res += _gens[i]->max_capacity();
 359   }
 360   return res;
 361 }
 362 
 363 // Update the _full_collections_completed counter
 364 // at the end of a stop-world full GC.
 365 unsigned int GenCollectedHeap::update_full_collections_completed() {
 366   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 367   assert(_full_collections_completed <= _total_full_collections,
 368          "Can't complete more collections than were started");
 369   _full_collections_completed = _total_full_collections;
 370   ml.notify_all();
 371   return _full_collections_completed;
 372 }
 373 
 374 // Update the _full_collections_completed counter, as appropriate,
 375 // at the end of a concurrent GC cycle. Note the conditional update
 376 // below to allow this method to be called by a concurrent collector
 377 // without synchronizing in any manner with the VM thread (which
 378 // may already have initiated a STW full collection "concurrently").
 379 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
 380   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 381   assert((_full_collections_completed <= _total_full_collections) &&
 382          (count <= _total_full_collections),
 383          "Can't complete more collections than were started");
 384   if (count > _full_collections_completed) {
 385     _full_collections_completed = count;
 386     ml.notify_all();
 387   }
 388   return _full_collections_completed;
 389 }
 390 
 391 
 392 #ifndef PRODUCT
 393 // Override of memory state checking method in CollectedHeap:
 394 // Some collectors (CMS for example) can't have badHeapWordVal written
 395 // in the first two words of an object. (For instance , in the case of
 396 // CMS these words hold state used to synchronize between certain
 397 // (concurrent) GC steps and direct allocating mutators.)
 398 // The skip_header_HeapWords() method below, allows us to skip
 399 // over the requisite number of HeapWord's. Note that (for
 400 // generational collectors) this means that those many words are
 401 // skipped in each object, irrespective of the generation in which
 402 // that object lives. The resultant loss of precision seems to be
 403 // harmless and the pain of avoiding that imprecision appears somewhat
 404 // higher than we are prepared to pay for such rudimentary debugging
 405 // support.
 406 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
 407                                                          size_t size) {
 408   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 409     // We are asked to check a size in HeapWords,
 410     // but the memory is mangled in juint words.
 411     juint* start = (juint*) (addr + skip_header_HeapWords());
 412     juint* end   = (juint*) (addr + size);
 413     for (juint* slot = start; slot < end; slot += 1) {
 414       assert(*slot == badHeapWordVal,
 415              "Found non badHeapWordValue in pre-allocation check");
 416     }
 417   }
 418 }
 419 #endif
 420 
 421 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 422                                                bool is_tlab,
 423                                                bool first_only) {
 424   HeapWord* res;
 425   for (int i = 0; i < _n_gens; i++) {
 426     if (_gens[i]->should_allocate(size, is_tlab)) {
 427       res = _gens[i]->allocate(size, is_tlab);
 428       if (res != NULL) return res;
 429       else if (first_only) break;
 430     }
 431   }
 432   // Otherwise...
 433   return NULL;
 434 }
 435 
 436 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 437                                          bool* gc_overhead_limit_was_exceeded) {
 438   return collector_policy()->mem_allocate_work(size,
 439                                                false /* is_tlab */,
 440                                                gc_overhead_limit_was_exceeded);
 441 }
 442 
 443 bool GenCollectedHeap::must_clear_all_soft_refs() {
 444   return _gc_cause == GCCause::_last_ditch_collection;
 445 }
 446 
 447 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 448   return UseConcMarkSweepGC &&
 449          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
 450           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
 451 }
 452 
 453 void GenCollectedHeap::do_collection(bool  full,
 454                                      bool   clear_all_soft_refs,
 455                                      size_t size,
 456                                      bool   is_tlab,
 457                                      int    max_level) {
 458   bool prepared_for_verification = false;
 459   ResourceMark rm;
 460   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 461 
 462   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 463   assert(my_thread->is_VM_thread() ||
 464          my_thread->is_ConcurrentGC_thread(),
 465          "incorrect thread type capability");
 466   assert(Heap_lock->is_locked(),
 467          "the requesting thread should have the Heap_lock");
 468   guarantee(!is_gc_active(), "collection is not reentrant");
 469   assert(max_level < n_gens(), "sanity check");
 470 
 471   if (GC_locker::check_active_before_gc()) {
 472     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 473   }
 474 
 475   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 476                           collector_policy()->should_clear_all_soft_refs();
 477 
 478   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 479 
 480   const size_t perm_prev_used = perm_gen()->used();
 481 
 482   print_heap_before_gc();
 483   if (Verbose) {
 484     gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
 485   }
 486 
 487   {
 488     FlagSetting fl(_is_gc_active, true);
 489 
 490     bool complete = full && (max_level == (n_gens()-1));
 491     const char* gc_cause_str = "GC ";
 492     if (complete) {
 493       GCCause::Cause cause = gc_cause();
 494       if (cause == GCCause::_java_lang_system_gc) {
 495         gc_cause_str = "Full GC (System) ";
 496       } else {
 497         gc_cause_str = "Full GC ";
 498       }
 499     }
 500     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 501     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 502     TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty);
 503 
 504     gc_prologue(complete);
 505     increment_total_collections(complete);
 506 
 507     size_t gch_prev_used = used();
 508 
 509     int starting_level = 0;
 510     if (full) {
 511       // Search for the oldest generation which will collect all younger
 512       // generations, and start collection loop there.
 513       for (int i = max_level; i >= 0; i--) {
 514         if (_gens[i]->full_collects_younger_generations()) {
 515           starting_level = i;
 516           break;
 517         }
 518       }
 519     }
 520 
 521     bool must_restore_marks_for_biased_locking = false;
 522 
 523     int max_level_collected = starting_level;
 524     for (int i = starting_level; i <= max_level; i++) {
 525       if (_gens[i]->should_collect(full, size, is_tlab)) {
 526         if (i == n_gens() - 1) {  // a major collection is to happen
 527           if (!complete) {
 528             // The full_collections increment was missed above.
 529             increment_total_full_collections();
 530           }
 531           pre_full_gc_dump();    // do any pre full gc dumps
 532         }
 533         // Timer for individual generations. Last argument is false: no CR
 534         TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
 535         TraceCollectorStats tcs(_gens[i]->counters());
 536         TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
 537 
 538         size_t prev_used = _gens[i]->used();
 539         _gens[i]->stat_record()->invocations++;
 540         _gens[i]->stat_record()->accumulated_time.start();
 541 
 542         // Must be done anew before each collection because
 543         // a previous collection will do mangling and will
 544         // change top of some spaces.
 545         record_gen_tops_before_GC();
 546 
 547         if (PrintGC && Verbose) {
 548           gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
 549                      i,
 550                      _gens[i]->stat_record()->invocations,
 551                      size*HeapWordSize);
 552         }
 553 
 554         if (VerifyBeforeGC && i >= VerifyGCLevel &&
 555             total_collections() >= VerifyGCStartAt) {
 556           HandleMark hm;  // Discard invalid handles created during verification
 557           if (!prepared_for_verification) {
 558             prepare_for_verify();
 559             prepared_for_verification = true;
 560           }
 561           gclog_or_tty->print(" VerifyBeforeGC:");
 562           Universe::verify(true);
 563         }
 564         COMPILER2_PRESENT(DerivedPointerTable::clear());
 565 
 566         if (!must_restore_marks_for_biased_locking &&
 567             _gens[i]->performs_in_place_marking()) {
 568           // We perform this mark word preservation work lazily
 569           // because it's only at this point that we know whether we
 570           // absolutely have to do it; we want to avoid doing it for
 571           // scavenge-only collections where it's unnecessary
 572           must_restore_marks_for_biased_locking = true;
 573           BiasedLocking::preserve_marks();
 574         }
 575 
 576         // Do collection work
 577         {
 578           // Note on ref discovery: For what appear to be historical reasons,
 579           // GCH enables and disabled (by enqueing) refs discovery.
 580           // In the future this should be moved into the generation's
 581           // collect method so that ref discovery and enqueueing concerns
 582           // are local to a generation. The collect method could return
 583           // an appropriate indication in the case that notification on
 584           // the ref lock was needed. This will make the treatment of
 585           // weak refs more uniform (and indeed remove such concerns
 586           // from GCH). XXX
 587 
 588           HandleMark hm;  // Discard invalid handles created during gc
 589           save_marks();   // save marks for all gens
 590           // We want to discover references, but not process them yet.
 591           // This mode is disabled in process_discovered_references if the
 592           // generation does some collection work, or in
 593           // enqueue_discovered_references if the generation returns
 594           // without doing any work.
 595           ReferenceProcessor* rp = _gens[i]->ref_processor();
 596           // If the discovery of ("weak") refs in this generation is
 597           // atomic wrt other collectors in this configuration, we
 598           // are guaranteed to have empty discovered ref lists.
 599           if (rp->discovery_is_atomic()) {
 600             rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 601             rp->setup_policy(do_clear_all_soft_refs);
 602           } else {
 603             // collect() below will enable discovery as appropriate
 604           }
 605           _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
 606           if (!rp->enqueuing_is_done()) {
 607             rp->enqueue_discovered_references();
 608           } else {
 609             rp->set_enqueuing_is_done(false);
 610           }
 611           rp->verify_no_references_recorded();
 612         }
 613         max_level_collected = i;
 614 
 615         // Determine if allocation request was met.
 616         if (size > 0) {
 617           if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
 618             if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
 619               size = 0;
 620             }
 621           }
 622         }
 623 
 624         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 625 
 626         _gens[i]->stat_record()->accumulated_time.stop();
 627 
 628         update_gc_stats(i, full);
 629 
 630         if (VerifyAfterGC && i >= VerifyGCLevel &&
 631             total_collections() >= VerifyGCStartAt) {
 632           HandleMark hm;  // Discard invalid handles created during verification
 633           gclog_or_tty->print(" VerifyAfterGC:");
 634           Universe::verify(false);
 635         }
 636 
 637         if (PrintGCDetails) {
 638           gclog_or_tty->print(":");
 639           _gens[i]->print_heap_change(prev_used);
 640         }
 641       }
 642     }
 643 
 644     // Update "complete" boolean wrt what actually transpired --
 645     // for instance, a promotion failure could have led to
 646     // a whole heap collection.
 647     complete = complete || (max_level_collected == n_gens() - 1);
 648 
 649     if (complete) { // We did a "major" collection
 650       post_full_gc_dump();   // do any post full gc dumps
 651     }
 652 
 653     if (PrintGCDetails) {
 654       print_heap_change(gch_prev_used);
 655 
 656       // Print perm gen info for full GC with PrintGCDetails flag.
 657       if (complete) {
 658         print_perm_heap_change(perm_prev_used);
 659       }
 660     }
 661 
 662     for (int j = max_level_collected; j >= 0; j -= 1) {
 663       // Adjust generation sizes.
 664       _gens[j]->compute_new_size();
 665     }
 666 
 667     if (complete) {
 668       // Ask the permanent generation to adjust size for full collections
 669       perm()->compute_new_size();
 670       update_full_collections_completed();
 671     }
 672 
 673     // Track memory usage and detect low memory after GC finishes
 674     MemoryService::track_memory_usage();
 675 
 676     gc_epilogue(complete);
 677 
 678     if (must_restore_marks_for_biased_locking) {
 679       BiasedLocking::restore_marks();
 680     }
 681   }
 682 
 683   AdaptiveSizePolicy* sp = gen_policy()->size_policy();
 684   AdaptiveSizePolicyOutput(sp, total_collections());
 685 
 686   print_heap_after_gc();
 687 
 688 #ifdef TRACESPINNING
 689   ParallelTaskTerminator::print_termination_counts();
 690 #endif
 691 
 692   if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
 693     tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
 694     vm_exit(-1);
 695   }
 696 }
 697 
 698 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 699   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
 700 }
 701 
 702 void GenCollectedHeap::set_par_threads(uint t) {
 703   SharedHeap::set_par_threads(t);
 704   _gen_process_strong_tasks->set_n_threads(t);
 705 }
 706 
 707 void GenCollectedHeap::
 708 gen_process_strong_roots(int level,
 709                          bool younger_gens_as_roots,
 710                          bool activate_scope,
 711                          bool collecting_perm_gen,
 712                          SharedHeap::ScanningOption so,
 713                          OopsInGenClosure* not_older_gens,
 714                          bool do_code_roots,
 715                          OopsInGenClosure* older_gens) {
 716   // General strong roots.
 717 
 718   if (!do_code_roots) {
 719     SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
 720                                      not_older_gens, NULL, older_gens);
 721   } else {
 722     bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
 723     CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
 724     SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
 725                                      not_older_gens, &code_roots, older_gens);
 726   }
 727 
 728   if (younger_gens_as_roots) {
 729     if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 730       for (int i = 0; i < level; i++) {
 731         not_older_gens->set_generation(_gens[i]);
 732         _gens[i]->oop_iterate(not_older_gens);
 733       }
 734       not_older_gens->reset_generation();
 735     }
 736   }
 737   // When collection is parallel, all threads get to cooperate to do
 738   // older-gen scanning.
 739   for (int i = level+1; i < _n_gens; i++) {
 740     older_gens->set_generation(_gens[i]);
 741     rem_set()->younger_refs_iterate(_gens[i], older_gens);
 742     older_gens->reset_generation();
 743   }
 744 
 745   _gen_process_strong_tasks->all_tasks_completed();
 746 }
 747 
 748 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
 749                                               CodeBlobClosure* code_roots,
 750                                               OopClosure* non_root_closure) {
 751   SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
 752   // "Local" "weak" refs
 753   for (int i = 0; i < _n_gens; i++) {
 754     _gens[i]->ref_processor()->weak_oops_do(root_closure);
 755   }
 756 }
 757 
 758 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 759 void GenCollectedHeap::                                                 \
 760 oop_since_save_marks_iterate(int level,                                 \
 761                              OopClosureType* cur,                       \
 762                              OopClosureType* older) {                   \
 763   _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);           \
 764   for (int i = level+1; i < n_gens(); i++) {                            \
 765     _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);           \
 766   }                                                                     \
 767   perm_gen()->oop_since_save_marks_iterate##nv_suffix(older);           \
 768 }
 769 
 770 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 771 
 772 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 773 
 774 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
 775   for (int i = level; i < _n_gens; i++) {
 776     if (!_gens[i]->no_allocs_since_save_marks()) return false;
 777   }
 778   return perm_gen()->no_allocs_since_save_marks();
 779 }
 780 
 781 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 782   return _gens[0]->supports_inline_contig_alloc();
 783 }
 784 
 785 HeapWord** GenCollectedHeap::top_addr() const {
 786   return _gens[0]->top_addr();
 787 }
 788 
 789 HeapWord** GenCollectedHeap::end_addr() const {
 790   return _gens[0]->end_addr();
 791 }
 792 
 793 size_t GenCollectedHeap::unsafe_max_alloc() {
 794   return _gens[0]->unsafe_max_alloc_nogc();
 795 }
 796 
 797 // public collection interfaces
 798 
 799 void GenCollectedHeap::collect(GCCause::Cause cause) {
 800   if (should_do_concurrent_full_gc(cause)) {
 801 #ifndef SERIALGC
 802     // mostly concurrent full collection
 803     collect_mostly_concurrent(cause);
 804 #else  // SERIALGC
 805     ShouldNotReachHere();
 806 #endif // SERIALGC
 807   } else {
 808 #ifdef ASSERT
 809     if (cause == GCCause::_scavenge_alot) {
 810       // minor collection only
 811       collect(cause, 0);
 812     } else {
 813       // Stop-the-world full collection
 814       collect(cause, n_gens() - 1);
 815     }
 816 #else
 817     // Stop-the-world full collection
 818     collect(cause, n_gens() - 1);
 819 #endif
 820   }
 821 }
 822 
 823 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
 824   // The caller doesn't have the Heap_lock
 825   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 826   MutexLocker ml(Heap_lock);
 827   collect_locked(cause, max_level);
 828 }
 829 
 830 // This interface assumes that it's being called by the
 831 // vm thread. It collects the heap assuming that the
 832 // heap lock is already held and that we are executing in
 833 // the context of the vm thread.
 834 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 835   assert(Thread::current()->is_VM_thread(), "Precondition#1");
 836   assert(Heap_lock->is_locked(), "Precondition#2");
 837   GCCauseSetter gcs(this, cause);
 838   switch (cause) {
 839     case GCCause::_heap_inspection:
 840     case GCCause::_heap_dump: {
 841       HandleMark hm;
 842       do_full_collection(false,         // don't clear all soft refs
 843                          n_gens() - 1);
 844       break;
 845     }
 846     default: // XXX FIX ME
 847       ShouldNotReachHere(); // Unexpected use of this function
 848   }
 849 }
 850 
 851 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 852   // The caller has the Heap_lock
 853   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 854   collect_locked(cause, n_gens() - 1);
 855 }
 856 
 857 // this is the private collection interface
 858 // The Heap_lock is expected to be held on entry.
 859 
 860 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
 861   if (_preloading_shared_classes) {
 862     report_out_of_shared_space(SharedPermGen);
 863   }
 864   // Read the GC count while holding the Heap_lock
 865   unsigned int gc_count_before      = total_collections();
 866   unsigned int full_gc_count_before = total_full_collections();
 867   {
 868     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 869     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 870                          cause, max_level);
 871     VMThread::execute(&op);
 872   }
 873 }
 874 
 875 #ifndef SERIALGC
 876 bool GenCollectedHeap::create_cms_collector() {
 877 
 878   assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
 879          (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
 880          _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
 881          "Unexpected generation kinds");
 882   // Skip two header words in the block content verification
 883   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 884   CMSCollector* collector = new CMSCollector(
 885     (ConcurrentMarkSweepGeneration*)_gens[1],
 886     (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
 887     _rem_set->as_CardTableRS(),
 888     (ConcurrentMarkSweepPolicy*) collector_policy());
 889 
 890   if (collector == NULL || !collector->completed_initialization()) {
 891     if (collector) {
 892       delete collector;  // Be nice in embedded situation
 893     }
 894     vm_shutdown_during_initialization("Could not create CMS collector");
 895     return false;
 896   }
 897   return true;  // success
 898 }
 899 
 900 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 901   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 902 
 903   MutexLocker ml(Heap_lock);
 904   // Read the GC counts while holding the Heap_lock
 905   unsigned int full_gc_count_before = total_full_collections();
 906   unsigned int gc_count_before      = total_collections();
 907   {
 908     MutexUnlocker mu(Heap_lock);
 909     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 910     VMThread::execute(&op);
 911   }
 912 }
 913 #endif // SERIALGC
 914 
 915 
 916 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 917                                           int max_level) {
 918   int local_max_level;
 919   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 920       gc_cause() == GCCause::_gc_locker) {
 921     local_max_level = 0;
 922   } else {
 923     local_max_level = max_level;
 924   }
 925 
 926   do_collection(true                 /* full */,
 927                 clear_all_soft_refs  /* clear_all_soft_refs */,
 928                 0                    /* size */,
 929                 false                /* is_tlab */,
 930                 local_max_level      /* max_level */);
 931   // Hack XXX FIX ME !!!
 932   // A scavenge may not have been attempted, or may have
 933   // been attempted and failed, because the old gen was too full
 934   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
 935       incremental_collection_will_fail(false /* don't consult_young */)) {
 936     if (PrintGCDetails) {
 937       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 938                              "because scavenge failed");
 939     }
 940     // This time allow the old gen to be collected as well
 941     do_collection(true                 /* full */,
 942                   clear_all_soft_refs  /* clear_all_soft_refs */,
 943                   0                    /* size */,
 944                   false                /* is_tlab */,
 945                   n_gens() - 1         /* max_level */);
 946   }
 947 }
 948 
 949 bool GenCollectedHeap::is_in_young(oop p) {
 950   bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
 951   assert(result == _gens[0]->is_in_reserved(p),
 952          err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
 953   return result;
 954 }
 955 
 956 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 957 bool GenCollectedHeap::is_in(const void* p) const {
 958   #ifndef ASSERT
 959   guarantee(VerifyBeforeGC   ||
 960             VerifyDuringGC   ||
 961             VerifyBeforeExit ||
 962             PrintAssembly    ||
 963             tty->count() != 0 ||   // already printing
 964             VerifyAfterGC    ||
 965     VMError::fatal_error_in_progress(), "too expensive");
 966 
 967   #endif
 968   // This might be sped up with a cache of the last generation that
 969   // answered yes.
 970   for (int i = 0; i < _n_gens; i++) {
 971     if (_gens[i]->is_in(p)) return true;
 972   }
 973   if (_perm_gen->as_gen()->is_in(p)) return true;
 974   // Otherwise...
 975   return false;
 976 }
 977 
 978 #ifdef ASSERT
 979 // Don't implement this by using is_in_young().  This method is used
 980 // in some cases to check that is_in_young() is correct.
 981 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 982   assert(is_in_reserved(p) || p == NULL,
 983     "Does not work if address is non-null and outside of the heap");
 984   // The order of the generations is young (low addr), old, perm (high addr)
 985   return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
 986 }
 987 #endif
 988 
 989 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
 990   for (int i = 0; i < _n_gens; i++) {
 991     _gens[i]->oop_iterate(cl);
 992   }
 993 }
 994 
 995 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
 996   for (int i = 0; i < _n_gens; i++) {
 997     _gens[i]->oop_iterate(mr, cl);
 998   }
 999 }
1000 
1001 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
1002   for (int i = 0; i < _n_gens; i++) {
1003     _gens[i]->object_iterate(cl);
1004   }
1005   perm_gen()->object_iterate(cl);
1006 }
1007 
1008 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
1009   for (int i = 0; i < _n_gens; i++) {
1010     _gens[i]->safe_object_iterate(cl);
1011   }
1012   perm_gen()->safe_object_iterate(cl);
1013 }
1014 
1015 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
1016   for (int i = 0; i < _n_gens; i++) {
1017     _gens[i]->object_iterate_since_last_GC(cl);
1018   }
1019 }
1020 
1021 Space* GenCollectedHeap::space_containing(const void* addr) const {
1022   for (int i = 0; i < _n_gens; i++) {
1023     Space* res = _gens[i]->space_containing(addr);
1024     if (res != NULL) return res;
1025   }
1026   Space* res = perm_gen()->space_containing(addr);
1027   if (res != NULL) return res;
1028   // Otherwise...
1029   assert(false, "Could not find containing space");
1030   return NULL;
1031 }
1032 
1033 
1034 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
1035   assert(is_in_reserved(addr), "block_start of address outside of heap");
1036   for (int i = 0; i < _n_gens; i++) {
1037     if (_gens[i]->is_in_reserved(addr)) {
1038       assert(_gens[i]->is_in(addr),
1039              "addr should be in allocated part of generation");
1040       return _gens[i]->block_start(addr);
1041     }
1042   }
1043   if (perm_gen()->is_in_reserved(addr)) {
1044     assert(perm_gen()->is_in(addr),
1045            "addr should be in allocated part of perm gen");
1046     return perm_gen()->block_start(addr);
1047   }
1048   assert(false, "Some generation should contain the address");
1049   return NULL;
1050 }
1051 
1052 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
1053   assert(is_in_reserved(addr), "block_size of address outside of heap");
1054   for (int i = 0; i < _n_gens; i++) {
1055     if (_gens[i]->is_in_reserved(addr)) {
1056       assert(_gens[i]->is_in(addr),
1057              "addr should be in allocated part of generation");
1058       return _gens[i]->block_size(addr);
1059     }
1060   }
1061   if (perm_gen()->is_in_reserved(addr)) {
1062     assert(perm_gen()->is_in(addr),
1063            "addr should be in allocated part of perm gen");
1064     return perm_gen()->block_size(addr);
1065   }
1066   assert(false, "Some generation should contain the address");
1067   return 0;
1068 }
1069 
1070 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1071   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
1072   assert(block_start(addr) == addr, "addr must be a block start");
1073   for (int i = 0; i < _n_gens; i++) {
1074     if (_gens[i]->is_in_reserved(addr)) {
1075       return _gens[i]->block_is_obj(addr);
1076     }
1077   }
1078   if (perm_gen()->is_in_reserved(addr)) {
1079     return perm_gen()->block_is_obj(addr);
1080   }
1081   assert(false, "Some generation should contain the address");
1082   return false;
1083 }
1084 
1085 bool GenCollectedHeap::supports_tlab_allocation() const {
1086   for (int i = 0; i < _n_gens; i += 1) {
1087     if (_gens[i]->supports_tlab_allocation()) {
1088       return true;
1089     }
1090   }
1091   return false;
1092 }
1093 
1094 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
1095   size_t result = 0;
1096   for (int i = 0; i < _n_gens; i += 1) {
1097     if (_gens[i]->supports_tlab_allocation()) {
1098       result += _gens[i]->tlab_capacity();
1099     }
1100   }
1101   return result;
1102 }
1103 
1104 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1105   size_t result = 0;
1106   for (int i = 0; i < _n_gens; i += 1) {
1107     if (_gens[i]->supports_tlab_allocation()) {
1108       result += _gens[i]->unsafe_max_tlab_alloc();
1109     }
1110   }
1111   return result;
1112 }
1113 
1114 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1115   bool gc_overhead_limit_was_exceeded;
1116   return collector_policy()->mem_allocate_work(size /* size */,
1117                                                true /* is_tlab */,
1118                                                &gc_overhead_limit_was_exceeded);
1119 }
1120 
1121 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1122 // from the list headed by "*prev_ptr".
1123 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1124   bool first = true;
1125   size_t min_size = 0;   // "first" makes this conceptually infinite.
1126   ScratchBlock **smallest_ptr, *smallest;
1127   ScratchBlock  *cur = *prev_ptr;
1128   while (cur) {
1129     assert(*prev_ptr == cur, "just checking");
1130     if (first || cur->num_words < min_size) {
1131       smallest_ptr = prev_ptr;
1132       smallest     = cur;
1133       min_size     = smallest->num_words;
1134       first        = false;
1135     }
1136     prev_ptr = &cur->next;
1137     cur     =  cur->next;
1138   }
1139   smallest      = *smallest_ptr;
1140   *smallest_ptr = smallest->next;
1141   return smallest;
1142 }
1143 
1144 // Sort the scratch block list headed by res into decreasing size order,
1145 // and set "res" to the result.
1146 static void sort_scratch_list(ScratchBlock*& list) {
1147   ScratchBlock* sorted = NULL;
1148   ScratchBlock* unsorted = list;
1149   while (unsorted) {
1150     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1151     smallest->next  = sorted;
1152     sorted          = smallest;
1153   }
1154   list = sorted;
1155 }
1156 
1157 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1158                                                size_t max_alloc_words) {
1159   ScratchBlock* res = NULL;
1160   for (int i = 0; i < _n_gens; i++) {
1161     _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1162   }
1163   sort_scratch_list(res);
1164   return res;
1165 }
1166 
1167 void GenCollectedHeap::release_scratch() {
1168   for (int i = 0; i < _n_gens; i++) {
1169     _gens[i]->reset_scratch();
1170   }
1171 }
1172 
1173 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1174   void do_generation(Generation* gen) {
1175     gen->prepare_for_verify();
1176   }
1177 };
1178 
1179 void GenCollectedHeap::prepare_for_verify() {
1180   ensure_parsability(false);        // no need to retire TLABs
1181   GenPrepareForVerifyClosure blk;
1182   generation_iterate(&blk, false);
1183   perm_gen()->prepare_for_verify();
1184 }
1185 
1186 
1187 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1188                                           bool old_to_young) {
1189   if (old_to_young) {
1190     for (int i = _n_gens-1; i >= 0; i--) {
1191       cl->do_generation(_gens[i]);
1192     }
1193   } else {
1194     for (int i = 0; i < _n_gens; i++) {
1195       cl->do_generation(_gens[i]);
1196     }
1197   }
1198 }
1199 
1200 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1201   for (int i = 0; i < _n_gens; i++) {
1202     _gens[i]->space_iterate(cl, true);
1203   }
1204   perm_gen()->space_iterate(cl, true);
1205 }
1206 
1207 bool GenCollectedHeap::is_maximal_no_gc() const {
1208   for (int i = 0; i < _n_gens; i++) {  // skip perm gen
1209     if (!_gens[i]->is_maximal_no_gc()) {
1210       return false;
1211     }
1212   }
1213   return true;
1214 }
1215 
1216 void GenCollectedHeap::save_marks() {
1217   for (int i = 0; i < _n_gens; i++) {
1218     _gens[i]->save_marks();
1219   }
1220   perm_gen()->save_marks();
1221 }
1222 
1223 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
1224   for (int i = 0; i <= collectedGen; i++) {
1225     _gens[i]->compute_new_size();
1226   }
1227 }
1228 
1229 GenCollectedHeap* GenCollectedHeap::heap() {
1230   assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1231   assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1232   return _gch;
1233 }
1234 
1235 
1236 void GenCollectedHeap::prepare_for_compaction() {
1237   Generation* scanning_gen = _gens[_n_gens-1];
1238   // Start by compacting into same gen.
1239   CompactPoint cp(scanning_gen, NULL, NULL);
1240   while (scanning_gen != NULL) {
1241     scanning_gen->prepare_for_compaction(&cp);
1242     scanning_gen = prev_gen(scanning_gen);
1243   }
1244 }
1245 
1246 GCStats* GenCollectedHeap::gc_stats(int level) const {
1247   return _gens[level]->gc_stats();
1248 }
1249 
1250 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1251   if (!silent) {
1252     gclog_or_tty->print("permgen ");
1253   }
1254   perm_gen()->verify();
1255   for (int i = _n_gens-1; i >= 0; i--) {
1256     Generation* g = _gens[i];
1257     if (!silent) {
1258       gclog_or_tty->print(g->name());
1259       gclog_or_tty->print(" ");
1260     }
1261     g->verify();
1262   }
1263   if (!silent) {
1264     gclog_or_tty->print("remset ");
1265   }
1266   rem_set()->verify();
1267 }
1268 
1269 void GenCollectedHeap::print_on(outputStream* st) const {
1270   for (int i = 0; i < _n_gens; i++) {
1271     _gens[i]->print_on(st);
1272   }
1273   perm_gen()->print_on(st);
1274 }
1275 
1276 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1277   if (workers() != NULL) {
1278     workers()->threads_do(tc);
1279   }
1280 #ifndef SERIALGC
1281   if (UseConcMarkSweepGC) {
1282     ConcurrentMarkSweepThread::threads_do(tc);
1283   }
1284 #endif // SERIALGC
1285 }
1286 
1287 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1288 #ifndef SERIALGC
1289   if (UseParNewGC) {
1290     workers()->print_worker_threads_on(st);
1291   }
1292   if (UseConcMarkSweepGC) {
1293     ConcurrentMarkSweepThread::print_all_on(st);
1294   }
1295 #endif // SERIALGC
1296 }
1297 
1298 void GenCollectedHeap::print_tracing_info() const {
1299   if (TraceGen0Time) {
1300     get_gen(0)->print_summary_info();
1301   }
1302   if (TraceGen1Time) {
1303     get_gen(1)->print_summary_info();
1304   }
1305 }
1306 
1307 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1308   if (PrintGCDetails && Verbose) {
1309     gclog_or_tty->print(" "  SIZE_FORMAT
1310                         "->" SIZE_FORMAT
1311                         "("  SIZE_FORMAT ")",
1312                         prev_used, used(), capacity());
1313   } else {
1314     gclog_or_tty->print(" "  SIZE_FORMAT "K"
1315                         "->" SIZE_FORMAT "K"
1316                         "("  SIZE_FORMAT "K)",
1317                         prev_used / K, used() / K, capacity() / K);
1318   }
1319 }
1320 
1321 //New method to print perm gen info with PrintGCDetails flag
1322 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
1323   gclog_or_tty->print(", [%s :", perm_gen()->short_name());
1324   perm_gen()->print_heap_change(perm_prev_used);
1325   gclog_or_tty->print("]");
1326 }
1327 
1328 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1329  private:
1330   bool _full;
1331  public:
1332   void do_generation(Generation* gen) {
1333     gen->gc_prologue(_full);
1334   }
1335   GenGCPrologueClosure(bool full) : _full(full) {};
1336 };
1337 
1338 void GenCollectedHeap::gc_prologue(bool full) {
1339   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1340 
1341   always_do_update_barrier = false;
1342   // Fill TLAB's and such
1343   CollectedHeap::accumulate_statistics_all_tlabs();
1344   ensure_parsability(true);   // retire TLABs
1345 
1346   // Call allocation profiler
1347   AllocationProfiler::iterate_since_last_gc();
1348   // Walk generations
1349   GenGCPrologueClosure blk(full);
1350   generation_iterate(&blk, false);  // not old-to-young.
1351   perm_gen()->gc_prologue(full);
1352 };
1353 
1354 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1355  private:
1356   bool _full;
1357  public:
1358   void do_generation(Generation* gen) {
1359     gen->gc_epilogue(_full);
1360   }
1361   GenGCEpilogueClosure(bool full) : _full(full) {};
1362 };
1363 
1364 void GenCollectedHeap::gc_epilogue(bool full) {
1365 #ifdef COMPILER2
1366   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1367   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1368   guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1369 #endif /* COMPILER2 */
1370 
1371   resize_all_tlabs();
1372 
1373   GenGCEpilogueClosure blk(full);
1374   generation_iterate(&blk, false);  // not old-to-young.
1375   perm_gen()->gc_epilogue(full);
1376 
1377   if (!CleanChunkPoolAsync) {
1378     Chunk::clean_chunk_pool();
1379   }
1380 
1381   always_do_update_barrier = UseConcMarkSweepGC;
1382 };
1383 
1384 #ifndef PRODUCT
1385 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1386  private:
1387  public:
1388   void do_generation(Generation* gen) {
1389     gen->record_spaces_top();
1390   }
1391 };
1392 
1393 void GenCollectedHeap::record_gen_tops_before_GC() {
1394   if (ZapUnusedHeapArea) {
1395     GenGCSaveTopsBeforeGCClosure blk;
1396     generation_iterate(&blk, false);  // not old-to-young.
1397     perm_gen()->record_spaces_top();
1398   }
1399 }
1400 #endif  // not PRODUCT
1401 
1402 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1403  public:
1404   void do_generation(Generation* gen) {
1405     gen->ensure_parsability();
1406   }
1407 };
1408 
1409 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1410   CollectedHeap::ensure_parsability(retire_tlabs);
1411   GenEnsureParsabilityClosure ep_cl;
1412   generation_iterate(&ep_cl, false);
1413   perm_gen()->ensure_parsability();
1414 }
1415 
1416 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
1417                                               oop obj,
1418                                               size_t obj_size) {
1419   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1420   HeapWord* result = NULL;
1421 
1422   // First give each higher generation a chance to allocate the promoted object.
1423   Generation* allocator = next_gen(gen);
1424   if (allocator != NULL) {
1425     do {
1426       result = allocator->allocate(obj_size, false);
1427     } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
1428   }
1429 
1430   if (result == NULL) {
1431     // Then give gen and higher generations a chance to expand and allocate the
1432     // object.
1433     do {
1434       result = gen->expand_and_allocate(obj_size, false);
1435     } while (result == NULL && (gen = next_gen(gen)) != NULL);
1436   }
1437 
1438   if (result != NULL) {
1439     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1440   }
1441   return oop(result);
1442 }
1443 
1444 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1445   jlong _time;   // in ms
1446   jlong _now;    // in ms
1447 
1448  public:
1449   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1450 
1451   jlong time() { return _time; }
1452 
1453   void do_generation(Generation* gen) {
1454     _time = MIN2(_time, gen->time_of_last_gc(_now));
1455   }
1456 };
1457 
1458 jlong GenCollectedHeap::millis_since_last_gc() {
1459   // We need a monotonically non-deccreasing time in ms but
1460   // os::javaTimeMillis() does not guarantee monotonicity.
1461   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1462   GenTimeOfLastGCClosure tolgc_cl(now);
1463   // iterate over generations getting the oldest
1464   // time that a generation was collected
1465   generation_iterate(&tolgc_cl, false);
1466   tolgc_cl.do_generation(perm_gen());
1467 
1468   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1469   // provided the underlying platform provides such a time source
1470   // (and it is bug free). So we still have to guard against getting
1471   // back a time later than 'now'.
1472   jlong retVal = now - tolgc_cl.time();
1473   if (retVal < 0) {
1474     NOT_PRODUCT(warning("time warp: "INT64_FORMAT, retVal);)
1475     return 0;
1476   }
1477   return retVal;
1478 }