1 /*
   2  * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "gc_implementation/shared/collectorCounters.hpp"
  31 #include "gc_implementation/shared/gcTraceTime.hpp"
  32 #include "gc_implementation/shared/vmGCOperations.hpp"
  33 #include "gc_interface/collectedHeap.inline.hpp"
  34 #include "memory/compactPermGen.hpp"
  35 #include "memory/filemap.hpp"
  36 #include "memory/gcLocker.inline.hpp"
  37 #include "memory/genCollectedHeap.hpp"
  38 #include "memory/genOopClosures.inline.hpp"
  39 #include "memory/generation.inline.hpp"
  40 #include "memory/generationSpec.hpp"
  41 #include "memory/permGen.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/sharedHeap.hpp"
  44 #include "memory/space.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "oops/oop.inline2.hpp"
  47 #include "runtime/aprofiler.hpp"
  48 #include "runtime/biasedLocking.hpp"
  49 #include "runtime/fprofiler.hpp"
  50 #include "runtime/handles.hpp"
  51 #include "runtime/handles.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/vmThread.hpp"
  54 #include "services/memoryService.hpp"
  55 #include "services/memTracker.hpp"
  56 #include "utilities/vmError.hpp"
  57 #include "utilities/workgroup.hpp"
  58 #ifndef SERIALGC
  59 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  60 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  61 #endif
  62 
  63 GenCollectedHeap* GenCollectedHeap::_gch;
  64 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
  65 
  66 // The set of potentially parallel tasks in strong root scanning.
  67 enum GCH_process_strong_roots_tasks {
  68   // We probably want to parallelize both of these internally, but for now...
  69   GCH_PS_younger_gens,
  70   // Leave this one last.
  71   GCH_PS_NumElements
  72 };
  73 
  74 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
  75   SharedHeap(policy),
  76   _gen_policy(policy),
  77   _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
  78   _full_collections_completed(0)
  79 {
  80   if (_gen_process_strong_tasks == NULL ||
  81       !_gen_process_strong_tasks->valid()) {
  82     vm_exit_during_initialization("Failed necessary allocation.");
  83   }
  84   assert(policy != NULL, "Sanity check");
  85   _preloading_shared_classes = false;
  86 }
  87 
  88 jint GenCollectedHeap::initialize() {
  89   CollectedHeap::pre_initialize();
  90 
  91   int i;
  92   _n_gens = gen_policy()->number_of_generations();
  93 
  94   // While there are no constraints in the GC code that HeapWordSize
  95   // be any particular value, there are multiple other areas in the
  96   // system which believe this to be true (e.g. oop->object_size in some
  97   // cases incorrectly returns the size in wordSize units rather than
  98   // HeapWordSize).
  99   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 100 
 101   // The heap must be at least as aligned as generations.
 102   size_t alignment = Generation::GenGrain;
 103 
 104   _gen_specs = gen_policy()->generations();
 105   PermanentGenerationSpec *perm_gen_spec =
 106                                 collector_policy()->permanent_generation();
 107 
 108   // Make sure the sizes are all aligned.
 109   for (i = 0; i < _n_gens; i++) {
 110     _gen_specs[i]->align(alignment);
 111   }
 112   perm_gen_spec->align(alignment);
 113 
 114   // If we are dumping the heap, then allocate a wasted block of address
 115   // space in order to push the heap to a lower address.  This extra
 116   // address range allows for other (or larger) libraries to be loaded
 117   // without them occupying the space required for the shared spaces.
 118 
 119   if (DumpSharedSpaces) {
 120     uintx reserved = 0;
 121     uintx block_size = 64*1024*1024;
 122     while (reserved < SharedDummyBlockSize) {
 123       char* dummy = os::reserve_memory(block_size);
 124       reserved += block_size;
 125     }
 126   }
 127 
 128   // Allocate space for the heap.
 129 
 130   char* heap_address;
 131   size_t total_reserved = 0;
 132   int n_covered_regions = 0;
 133   ReservedSpace heap_rs(0);
 134 
 135   heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
 136                           &n_covered_regions, &heap_rs);
 137 
 138   if (UseSharedSpaces) {
 139     if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
 140       if (heap_rs.is_reserved()) {
 141         heap_rs.release();
 142       }
 143       FileMapInfo* mapinfo = FileMapInfo::current_info();
 144       mapinfo->fail_continue("Unable to reserve shared region.");
 145       allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
 146                &heap_rs);
 147     }
 148   }
 149 
 150   if (!heap_rs.is_reserved()) {
 151     vm_shutdown_during_initialization(
 152       "Could not reserve enough space for object heap");
 153     return JNI_ENOMEM;
 154   }
 155 
 156   _reserved = MemRegion((HeapWord*)heap_rs.base(),
 157                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 158 
 159   // It is important to do this in a way such that concurrent readers can't
 160   // temporarily think somethings in the heap.  (Seen this happen in asserts.)
 161   _reserved.set_word_size(0);
 162   _reserved.set_start((HeapWord*)heap_rs.base());
 163   size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
 164                                            - perm_gen_spec->misc_code_size();
 165   _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
 166 
 167   _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
 168   set_barrier_set(rem_set()->bs());
 169 
 170   _gch = this;
 171 
 172   for (i = 0; i < _n_gens; i++) {
 173     ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
 174                                               UseSharedSpaces, UseSharedSpaces);
 175     _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
 176     // tag generations in JavaHeap
 177     MemTracker::record_virtual_memory_type((address)this_rs.base(), mtJavaHeap);
 178     heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
 179   }
 180   _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
 181   // tag PermGen
 182   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
 183 
 184   clear_incremental_collection_failed();
 185 
 186 #ifndef SERIALGC
 187   // If we are running CMS, create the collector responsible
 188   // for collecting the CMS generations.
 189   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 190     bool success = create_cms_collector();
 191     if (!success) return JNI_ENOMEM;
 192   }
 193 #endif // SERIALGC
 194 
 195   return JNI_OK;
 196 }
 197 
 198 
 199 char* GenCollectedHeap::allocate(size_t alignment,
 200                                  PermanentGenerationSpec* perm_gen_spec,
 201                                  size_t* _total_reserved,
 202                                  int* _n_covered_regions,
 203                                  ReservedSpace* heap_rs){
 204   // Now figure out the total size.
 205   size_t total_reserved = 0;
 206   int n_covered_regions = 0;
 207   const size_t pageSize = UseLargePages ?
 208       os::large_page_size() : os::vm_page_size();
 209 
 210   for (int i = 0; i < _n_gens; i++) {
 211     total_reserved = add_and_check_overflow(total_reserved, _gen_specs[i]->max_size());
 212     n_covered_regions += _gen_specs[i]->n_covered_regions();
 213   }
 214 
 215   assert(total_reserved % pageSize == 0,
 216          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
 217                  SIZE_FORMAT, total_reserved, pageSize));
 218   total_reserved = add_and_check_overflow(total_reserved, perm_gen_spec->max_size());
 219   assert(total_reserved % pageSize == 0,
 220          err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
 221                  SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
 222                  pageSize, perm_gen_spec->max_size()));
 223 
 224   n_covered_regions += perm_gen_spec->n_covered_regions();
 225 
 226   // Add the size of the data area which shares the same reserved area
 227   // as the heap, but which is not actually part of the heap.
 228   size_t misc = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
 229   total_reserved = add_and_check_overflow(total_reserved, misc);
 230 
 231   if (UseLargePages) {
 232     assert(total_reserved != 0, "total_reserved cannot be 0");
 233     total_reserved = round_up_and_check_overflow(total_reserved, os::large_page_size());
 234   }
 235 
 236   // Calculate the address at which the heap must reside in order for
 237   // the shared data to be at the required address.
 238 
 239   char* heap_address;
 240   if (UseSharedSpaces) {
 241 
 242     // Calculate the address of the first word beyond the heap.
 243     FileMapInfo* mapinfo = FileMapInfo::current_info();
 244     int lr = CompactingPermGenGen::n_regions - 1;
 245     size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
 246     heap_address = mapinfo->region_base(lr) + capacity;
 247 
 248     // Calculate the address of the first word of the heap.
 249     heap_address -= total_reserved;
 250   } else {
 251     heap_address = NULL;  // any address will do.
 252     if (UseCompressedOops) {
 253       heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 254       *_total_reserved = total_reserved;
 255       *_n_covered_regions = n_covered_regions;
 256       *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 257                                    UseLargePages, heap_address);
 258 
 259       if (heap_address != NULL && !heap_rs->is_reserved()) {
 260         // Failed to reserve at specified address - the requested memory
 261         // region is taken already, for example, by 'java' launcher.
 262         // Try again to reserver heap higher.
 263         heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
 264         *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 265                                      UseLargePages, heap_address);
 266 
 267         if (heap_address != NULL && !heap_rs->is_reserved()) {
 268           // Failed to reserve at specified address again - give up.
 269           heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
 270           assert(heap_address == NULL, "");
 271           *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 272                                        UseLargePages, heap_address);
 273         }
 274       }
 275       return heap_address;
 276     }
 277   }
 278 
 279   *_total_reserved = total_reserved;
 280   *_n_covered_regions = n_covered_regions;
 281   *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 282                                UseLargePages, heap_address);
 283 
 284   return heap_address;
 285 }
 286 
 287 
 288 void GenCollectedHeap::post_initialize() {
 289   SharedHeap::post_initialize();
 290   TwoGenerationCollectorPolicy *policy =
 291     (TwoGenerationCollectorPolicy *)collector_policy();
 292   guarantee(policy->is_two_generation_policy(), "Illegal policy type");
 293   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
 294   assert(def_new_gen->kind() == Generation::DefNew ||
 295          def_new_gen->kind() == Generation::ParNew ||
 296          def_new_gen->kind() == Generation::ASParNew,
 297          "Wrong generation kind");
 298 
 299   Generation* old_gen = get_gen(1);
 300   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
 301          old_gen->kind() == Generation::ASConcurrentMarkSweep ||
 302          old_gen->kind() == Generation::MarkSweepCompact,
 303     "Wrong generation kind");
 304 
 305   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 306                                  old_gen->capacity(),
 307                                  def_new_gen->from()->capacity());
 308   policy->initialize_gc_policy_counters();
 309 }
 310 
 311 void GenCollectedHeap::ref_processing_init() {
 312   SharedHeap::ref_processing_init();
 313   for (int i = 0; i < _n_gens; i++) {
 314     _gens[i]->ref_processor_init();
 315   }
 316 }
 317 
 318 size_t GenCollectedHeap::capacity() const {
 319   size_t res = 0;
 320   for (int i = 0; i < _n_gens; i++) {
 321     res += _gens[i]->capacity();
 322   }
 323   return res;
 324 }
 325 
 326 size_t GenCollectedHeap::used() const {
 327   size_t res = 0;
 328   for (int i = 0; i < _n_gens; i++) {
 329     res += _gens[i]->used();
 330   }
 331   return res;
 332 }
 333 
 334 // Save the "used_region" for generations level and lower,
 335 // and, if perm is true, for perm gen.
 336 void GenCollectedHeap::save_used_regions(int level, bool perm) {
 337   assert(level < _n_gens, "Illegal level parameter");
 338   for (int i = level; i >= 0; i--) {
 339     _gens[i]->save_used_region();
 340   }
 341   if (perm) {
 342     perm_gen()->save_used_region();
 343   }
 344 }
 345 
 346 size_t GenCollectedHeap::max_capacity() const {
 347   size_t res = 0;
 348   for (int i = 0; i < _n_gens; i++) {
 349     res += _gens[i]->max_capacity();
 350   }
 351   return res;
 352 }
 353 
 354 // Update the _full_collections_completed counter
 355 // at the end of a stop-world full GC.
 356 unsigned int GenCollectedHeap::update_full_collections_completed() {
 357   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 358   assert(_full_collections_completed <= _total_full_collections,
 359          "Can't complete more collections than were started");
 360   _full_collections_completed = _total_full_collections;
 361   ml.notify_all();
 362   return _full_collections_completed;
 363 }
 364 
 365 // Update the _full_collections_completed counter, as appropriate,
 366 // at the end of a concurrent GC cycle. Note the conditional update
 367 // below to allow this method to be called by a concurrent collector
 368 // without synchronizing in any manner with the VM thread (which
 369 // may already have initiated a STW full collection "concurrently").
 370 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
 371   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 372   assert((_full_collections_completed <= _total_full_collections) &&
 373          (count <= _total_full_collections),
 374          "Can't complete more collections than were started");
 375   if (count > _full_collections_completed) {
 376     _full_collections_completed = count;
 377     ml.notify_all();
 378   }
 379   return _full_collections_completed;
 380 }
 381 
 382 
 383 #ifndef PRODUCT
 384 // Override of memory state checking method in CollectedHeap:
 385 // Some collectors (CMS for example) can't have badHeapWordVal written
 386 // in the first two words of an object. (For instance , in the case of
 387 // CMS these words hold state used to synchronize between certain
 388 // (concurrent) GC steps and direct allocating mutators.)
 389 // The skip_header_HeapWords() method below, allows us to skip
 390 // over the requisite number of HeapWord's. Note that (for
 391 // generational collectors) this means that those many words are
 392 // skipped in each object, irrespective of the generation in which
 393 // that object lives. The resultant loss of precision seems to be
 394 // harmless and the pain of avoiding that imprecision appears somewhat
 395 // higher than we are prepared to pay for such rudimentary debugging
 396 // support.
 397 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
 398                                                          size_t size) {
 399   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 400     // We are asked to check a size in HeapWords,
 401     // but the memory is mangled in juint words.
 402     juint* start = (juint*) (addr + skip_header_HeapWords());
 403     juint* end   = (juint*) (addr + size);
 404     for (juint* slot = start; slot < end; slot += 1) {
 405       assert(*slot == badHeapWordVal,
 406              "Found non badHeapWordValue in pre-allocation check");
 407     }
 408   }
 409 }
 410 #endif
 411 
 412 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 413                                                bool is_tlab,
 414                                                bool first_only) {
 415   HeapWord* res;
 416   for (int i = 0; i < _n_gens; i++) {
 417     if (_gens[i]->should_allocate(size, is_tlab)) {
 418       res = _gens[i]->allocate(size, is_tlab);
 419       if (res != NULL) return res;
 420       else if (first_only) break;
 421     }
 422   }
 423   // Otherwise...
 424   return NULL;
 425 }
 426 
 427 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 428                                          bool* gc_overhead_limit_was_exceeded) {
 429   return collector_policy()->mem_allocate_work(size,
 430                                                false /* is_tlab */,
 431                                                gc_overhead_limit_was_exceeded);
 432 }
 433 
 434 bool GenCollectedHeap::must_clear_all_soft_refs() {
 435   return _gc_cause == GCCause::_last_ditch_collection;
 436 }
 437 
 438 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 439   return UseConcMarkSweepGC &&
 440          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
 441           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
 442 }
 443 
 444 void GenCollectedHeap::do_collection(bool  full,
 445                                      bool   clear_all_soft_refs,
 446                                      size_t size,
 447                                      bool   is_tlab,
 448                                      int    max_level) {
 449   bool prepared_for_verification = false;
 450   ResourceMark rm;
 451   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 452 
 453   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 454   assert(my_thread->is_VM_thread() ||
 455          my_thread->is_ConcurrentGC_thread(),
 456          "incorrect thread type capability");
 457   assert(Heap_lock->is_locked(),
 458          "the requesting thread should have the Heap_lock");
 459   guarantee(!is_gc_active(), "collection is not reentrant");
 460   assert(max_level < n_gens(), "sanity check");
 461 
 462   if (GC_locker::check_active_before_gc()) {
 463     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 464   }
 465 
 466   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 467                           collector_policy()->should_clear_all_soft_refs();
 468 
 469   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 470 
 471   const size_t perm_prev_used = perm_gen()->used();
 472 
 473   print_heap_before_gc();
 474 
 475   {
 476     FlagSetting fl(_is_gc_active, true);
 477 
 478     bool complete = full && (max_level == (n_gens()-1));
 479     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 480     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 481     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 482     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
 483 
 484     gc_prologue(complete);
 485     increment_total_collections(complete);
 486 
 487     size_t gch_prev_used = used();
 488 
 489     int starting_level = 0;
 490     if (full) {
 491       // Search for the oldest generation which will collect all younger
 492       // generations, and start collection loop there.
 493       for (int i = max_level; i >= 0; i--) {
 494         if (_gens[i]->full_collects_younger_generations()) {
 495           starting_level = i;
 496           break;
 497         }
 498       }
 499     }
 500 
 501     bool must_restore_marks_for_biased_locking = false;
 502 
 503     int max_level_collected = starting_level;
 504     for (int i = starting_level; i <= max_level; i++) {
 505       if (_gens[i]->should_collect(full, size, is_tlab)) {
 506         if (i == n_gens() - 1) {  // a major collection is to happen
 507           if (!complete) {
 508             // The full_collections increment was missed above.
 509             increment_total_full_collections();
 510           }
 511           pre_full_gc_dump(NULL);    // do any pre full gc dumps
 512         }
 513         // Timer for individual generations. Last argument is false: no CR
 514         // FIXME: We should try to start the timing earlier to cover more of the GC pause
 515         GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL);
 516         TraceCollectorStats tcs(_gens[i]->counters());
 517         TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
 518 
 519         size_t prev_used = _gens[i]->used();
 520         _gens[i]->stat_record()->invocations++;
 521         _gens[i]->stat_record()->accumulated_time.start();
 522 
 523         // Must be done anew before each collection because
 524         // a previous collection will do mangling and will
 525         // change top of some spaces.
 526         record_gen_tops_before_GC();
 527 
 528         if (PrintGC && Verbose) {
 529           gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
 530                      i,
 531                      _gens[i]->stat_record()->invocations,
 532                      size*HeapWordSize);
 533         }
 534 
 535         if (VerifyBeforeGC && i >= VerifyGCLevel &&
 536             total_collections() >= VerifyGCStartAt) {
 537           HandleMark hm;  // Discard invalid handles created during verification
 538           if (!prepared_for_verification) {
 539             prepare_for_verify();
 540             prepared_for_verification = true;
 541           }
 542           gclog_or_tty->print(" VerifyBeforeGC:");
 543           Universe::verify();
 544         }
 545         COMPILER2_PRESENT(DerivedPointerTable::clear());
 546 
 547         if (!must_restore_marks_for_biased_locking &&
 548             _gens[i]->performs_in_place_marking()) {
 549           // We perform this mark word preservation work lazily
 550           // because it's only at this point that we know whether we
 551           // absolutely have to do it; we want to avoid doing it for
 552           // scavenge-only collections where it's unnecessary
 553           must_restore_marks_for_biased_locking = true;
 554           BiasedLocking::preserve_marks();
 555         }
 556 
 557         // Do collection work
 558         {
 559           // Note on ref discovery: For what appear to be historical reasons,
 560           // GCH enables and disabled (by enqueing) refs discovery.
 561           // In the future this should be moved into the generation's
 562           // collect method so that ref discovery and enqueueing concerns
 563           // are local to a generation. The collect method could return
 564           // an appropriate indication in the case that notification on
 565           // the ref lock was needed. This will make the treatment of
 566           // weak refs more uniform (and indeed remove such concerns
 567           // from GCH). XXX
 568 
 569           HandleMark hm;  // Discard invalid handles created during gc
 570           save_marks();   // save marks for all gens
 571           // We want to discover references, but not process them yet.
 572           // This mode is disabled in process_discovered_references if the
 573           // generation does some collection work, or in
 574           // enqueue_discovered_references if the generation returns
 575           // without doing any work.
 576           ReferenceProcessor* rp = _gens[i]->ref_processor();
 577           // If the discovery of ("weak") refs in this generation is
 578           // atomic wrt other collectors in this configuration, we
 579           // are guaranteed to have empty discovered ref lists.
 580           if (rp->discovery_is_atomic()) {
 581             rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 582             rp->setup_policy(do_clear_all_soft_refs);
 583           } else {
 584             // collect() below will enable discovery as appropriate
 585           }
 586           _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
 587           if (!rp->enqueuing_is_done()) {
 588             rp->enqueue_discovered_references();
 589           } else {
 590             rp->set_enqueuing_is_done(false);
 591           }
 592           rp->verify_no_references_recorded();
 593         }
 594         max_level_collected = i;
 595 
 596         // Determine if allocation request was met.
 597         if (size > 0) {
 598           if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
 599             if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
 600               size = 0;
 601             }
 602           }
 603         }
 604 
 605         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 606 
 607         _gens[i]->stat_record()->accumulated_time.stop();
 608 
 609         update_gc_stats(i, full);
 610 
 611         if (VerifyAfterGC && i >= VerifyGCLevel &&
 612             total_collections() >= VerifyGCStartAt) {
 613           HandleMark hm;  // Discard invalid handles created during verification
 614           gclog_or_tty->print(" VerifyAfterGC:");
 615           Universe::verify();
 616         }
 617 
 618         if (PrintGCDetails) {
 619           gclog_or_tty->print(":");
 620           _gens[i]->print_heap_change(prev_used);
 621         }
 622       }
 623     }
 624 
 625     // Update "complete" boolean wrt what actually transpired --
 626     // for instance, a promotion failure could have led to
 627     // a whole heap collection.
 628     complete = complete || (max_level_collected == n_gens() - 1);
 629 
 630     if (complete) { // We did a "major" collection
 631       // FIXME: See comment at pre_full_gc_dump call
 632       post_full_gc_dump(NULL);   // do any post full gc dumps
 633     }
 634 
 635     if (PrintGCDetails) {
 636       print_heap_change(gch_prev_used);
 637 
 638       // Print perm gen info for full GC with PrintGCDetails flag.
 639       if (complete) {
 640         print_perm_heap_change(perm_prev_used);
 641       }
 642     }
 643 
 644     for (int j = max_level_collected; j >= 0; j -= 1) {
 645       // Adjust generation sizes.
 646       _gens[j]->compute_new_size();
 647     }
 648 
 649     if (complete) {
 650       // Ask the permanent generation to adjust size for full collections
 651       perm()->compute_new_size();
 652       update_full_collections_completed();
 653     }
 654 
 655     // Track memory usage and detect low memory after GC finishes
 656     MemoryService::track_memory_usage();
 657 
 658     gc_epilogue(complete);
 659 
 660     if (must_restore_marks_for_biased_locking) {
 661       BiasedLocking::restore_marks();
 662     }
 663   }
 664 
 665   AdaptiveSizePolicy* sp = gen_policy()->size_policy();
 666   AdaptiveSizePolicyOutput(sp, total_collections());
 667 
 668   print_heap_after_gc();
 669 
 670 #ifdef TRACESPINNING
 671   ParallelTaskTerminator::print_termination_counts();
 672 #endif
 673 }
 674 
 675 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 676   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
 677 }
 678 
 679 void GenCollectedHeap::set_par_threads(uint t) {
 680   SharedHeap::set_par_threads(t);
 681   _gen_process_strong_tasks->set_n_threads(t);
 682 }
 683 
 684 void GenCollectedHeap::
 685 gen_process_strong_roots(int level,
 686                          bool younger_gens_as_roots,
 687                          bool activate_scope,
 688                          bool collecting_perm_gen,
 689                          SharedHeap::ScanningOption so,
 690                          OopsInGenClosure* not_older_gens,
 691                          bool do_code_roots,
 692                          OopsInGenClosure* older_gens) {
 693   // General strong roots.
 694 
 695   if (!do_code_roots) {
 696     SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
 697                                      not_older_gens, NULL, older_gens);
 698   } else {
 699     bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
 700     CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
 701     SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
 702                                      not_older_gens, &code_roots, older_gens);
 703   }
 704 
 705   if (younger_gens_as_roots) {
 706     if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 707       for (int i = 0; i < level; i++) {
 708         not_older_gens->set_generation(_gens[i]);
 709         _gens[i]->oop_iterate(not_older_gens);
 710       }
 711       not_older_gens->reset_generation();
 712     }
 713   }
 714   // When collection is parallel, all threads get to cooperate to do
 715   // older-gen scanning.
 716   for (int i = level+1; i < _n_gens; i++) {
 717     older_gens->set_generation(_gens[i]);
 718     rem_set()->younger_refs_iterate(_gens[i], older_gens);
 719     older_gens->reset_generation();
 720   }
 721 
 722   _gen_process_strong_tasks->all_tasks_completed();
 723 }
 724 
 725 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
 726                                               CodeBlobClosure* code_roots,
 727                                               OopClosure* non_root_closure) {
 728   SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
 729   // "Local" "weak" refs
 730   for (int i = 0; i < _n_gens; i++) {
 731     _gens[i]->ref_processor()->weak_oops_do(root_closure);
 732   }
 733 }
 734 
 735 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 736 void GenCollectedHeap::                                                 \
 737 oop_since_save_marks_iterate(int level,                                 \
 738                              OopClosureType* cur,                       \
 739                              OopClosureType* older) {                   \
 740   _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);           \
 741   for (int i = level+1; i < n_gens(); i++) {                            \
 742     _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);           \
 743   }                                                                     \
 744   perm_gen()->oop_since_save_marks_iterate##nv_suffix(older);           \
 745 }
 746 
 747 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 748 
 749 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 750 
 751 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
 752   for (int i = level; i < _n_gens; i++) {
 753     if (!_gens[i]->no_allocs_since_save_marks()) return false;
 754   }
 755   return perm_gen()->no_allocs_since_save_marks();
 756 }
 757 
 758 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 759   return _gens[0]->supports_inline_contig_alloc();
 760 }
 761 
 762 HeapWord** GenCollectedHeap::top_addr() const {
 763   return _gens[0]->top_addr();
 764 }
 765 
 766 HeapWord** GenCollectedHeap::end_addr() const {
 767   return _gens[0]->end_addr();
 768 }
 769 
 770 size_t GenCollectedHeap::unsafe_max_alloc() {
 771   return _gens[0]->unsafe_max_alloc_nogc();
 772 }
 773 
 774 // public collection interfaces
 775 
 776 void GenCollectedHeap::collect(GCCause::Cause cause) {
 777   if (should_do_concurrent_full_gc(cause)) {
 778 #ifndef SERIALGC
 779     // mostly concurrent full collection
 780     collect_mostly_concurrent(cause);
 781 #else  // SERIALGC
 782     ShouldNotReachHere();
 783 #endif // SERIALGC
 784   } else {
 785 #ifdef ASSERT
 786     if (cause == GCCause::_scavenge_alot) {
 787       // minor collection only
 788       collect(cause, 0);
 789     } else {
 790       // Stop-the-world full collection
 791       collect(cause, n_gens() - 1);
 792     }
 793 #else
 794     // Stop-the-world full collection
 795     collect(cause, n_gens() - 1);
 796 #endif
 797   }
 798 }
 799 
 800 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
 801   // The caller doesn't have the Heap_lock
 802   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 803   MutexLocker ml(Heap_lock);
 804   collect_locked(cause, max_level);
 805 }
 806 
 807 // This interface assumes that it's being called by the
 808 // vm thread. It collects the heap assuming that the
 809 // heap lock is already held and that we are executing in
 810 // the context of the vm thread.
 811 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 812   assert(Thread::current()->is_VM_thread(), "Precondition#1");
 813   assert(Heap_lock->is_locked(), "Precondition#2");
 814   GCCauseSetter gcs(this, cause);
 815   switch (cause) {
 816     case GCCause::_heap_inspection:
 817     case GCCause::_heap_dump: {
 818       HandleMark hm;
 819       do_full_collection(false,         // don't clear all soft refs
 820                          n_gens() - 1);
 821       break;
 822     }
 823     default: // XXX FIX ME
 824       ShouldNotReachHere(); // Unexpected use of this function
 825   }
 826 }
 827 
 828 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 829   // The caller has the Heap_lock
 830   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 831   collect_locked(cause, n_gens() - 1);
 832 }
 833 
 834 // this is the private collection interface
 835 // The Heap_lock is expected to be held on entry.
 836 
 837 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
 838   if (_preloading_shared_classes) {
 839     report_out_of_shared_space(SharedPermGen);
 840   }
 841   // Read the GC count while holding the Heap_lock
 842   unsigned int gc_count_before      = total_collections();
 843   unsigned int full_gc_count_before = total_full_collections();
 844   {
 845     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 846     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 847                          cause, max_level);
 848     VMThread::execute(&op);
 849   }
 850 }
 851 
 852 #ifndef SERIALGC
 853 bool GenCollectedHeap::create_cms_collector() {
 854 
 855   assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
 856          (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
 857          _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
 858          "Unexpected generation kinds");
 859   // Skip two header words in the block content verification
 860   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 861   CMSCollector* collector = new CMSCollector(
 862     (ConcurrentMarkSweepGeneration*)_gens[1],
 863     (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
 864     _rem_set->as_CardTableRS(),
 865     (ConcurrentMarkSweepPolicy*) collector_policy());
 866 
 867   if (collector == NULL || !collector->completed_initialization()) {
 868     if (collector) {
 869       delete collector;  // Be nice in embedded situation
 870     }
 871     vm_shutdown_during_initialization("Could not create CMS collector");
 872     return false;
 873   }
 874   return true;  // success
 875 }
 876 
 877 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 878   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 879 
 880   MutexLocker ml(Heap_lock);
 881   // Read the GC counts while holding the Heap_lock
 882   unsigned int full_gc_count_before = total_full_collections();
 883   unsigned int gc_count_before      = total_collections();
 884   {
 885     MutexUnlocker mu(Heap_lock);
 886     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 887     VMThread::execute(&op);
 888   }
 889 }
 890 #endif // SERIALGC
 891 
 892 
 893 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 894                                           int max_level) {
 895   int local_max_level;
 896   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 897       gc_cause() == GCCause::_gc_locker) {
 898     local_max_level = 0;
 899   } else {
 900     local_max_level = max_level;
 901   }
 902 
 903   do_collection(true                 /* full */,
 904                 clear_all_soft_refs  /* clear_all_soft_refs */,
 905                 0                    /* size */,
 906                 false                /* is_tlab */,
 907                 local_max_level      /* max_level */);
 908   // Hack XXX FIX ME !!!
 909   // A scavenge may not have been attempted, or may have
 910   // been attempted and failed, because the old gen was too full
 911   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
 912       incremental_collection_will_fail(false /* don't consult_young */)) {
 913     if (PrintGCDetails) {
 914       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 915                              "because scavenge failed");
 916     }
 917     // This time allow the old gen to be collected as well
 918     do_collection(true                 /* full */,
 919                   clear_all_soft_refs  /* clear_all_soft_refs */,
 920                   0                    /* size */,
 921                   false                /* is_tlab */,
 922                   n_gens() - 1         /* max_level */);
 923   }
 924 }
 925 
 926 bool GenCollectedHeap::is_in_young(oop p) {
 927   bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
 928   assert(result == _gens[0]->is_in_reserved(p),
 929          err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
 930   return result;
 931 }
 932 
 933 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 934 bool GenCollectedHeap::is_in(const void* p) const {
 935   #ifndef ASSERT
 936   guarantee(VerifyBeforeGC   ||
 937             VerifyDuringGC   ||
 938             VerifyBeforeExit ||
 939             PrintAssembly    ||
 940             tty->count() != 0 ||   // already printing
 941             VerifyAfterGC    ||
 942     VMError::fatal_error_in_progress(), "too expensive");
 943 
 944   #endif
 945   // This might be sped up with a cache of the last generation that
 946   // answered yes.
 947   for (int i = 0; i < _n_gens; i++) {
 948     if (_gens[i]->is_in(p)) return true;
 949   }
 950   if (_perm_gen->as_gen()->is_in(p)) return true;
 951   // Otherwise...
 952   return false;
 953 }
 954 
 955 #ifdef ASSERT
 956 // Don't implement this by using is_in_young().  This method is used
 957 // in some cases to check that is_in_young() is correct.
 958 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 959   assert(is_in_reserved(p) || p == NULL,
 960     "Does not work if address is non-null and outside of the heap");
 961   // The order of the generations is young (low addr), old, perm (high addr)
 962   return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
 963 }
 964 #endif
 965 
 966 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
 967   for (int i = 0; i < _n_gens; i++) {
 968     _gens[i]->oop_iterate(cl);
 969   }
 970 }
 971 
 972 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
 973   for (int i = 0; i < _n_gens; i++) {
 974     _gens[i]->oop_iterate(mr, cl);
 975   }
 976 }
 977 
 978 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 979   for (int i = 0; i < _n_gens; i++) {
 980     _gens[i]->object_iterate(cl);
 981   }
 982   perm_gen()->object_iterate(cl);
 983 }
 984 
 985 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 986   for (int i = 0; i < _n_gens; i++) {
 987     _gens[i]->safe_object_iterate(cl);
 988   }
 989   perm_gen()->safe_object_iterate(cl);
 990 }
 991 
 992 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
 993   for (int i = 0; i < _n_gens; i++) {
 994     _gens[i]->object_iterate_since_last_GC(cl);
 995   }
 996 }
 997 
 998 Space* GenCollectedHeap::space_containing(const void* addr) const {
 999   for (int i = 0; i < _n_gens; i++) {
1000     Space* res = _gens[i]->space_containing(addr);
1001     if (res != NULL) return res;
1002   }
1003   Space* res = perm_gen()->space_containing(addr);
1004   if (res != NULL) return res;
1005   // Otherwise...
1006   assert(false, "Could not find containing space");
1007   return NULL;
1008 }
1009 
1010 
1011 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
1012   assert(is_in_reserved(addr), "block_start of address outside of heap");
1013   for (int i = 0; i < _n_gens; i++) {
1014     if (_gens[i]->is_in_reserved(addr)) {
1015       assert(_gens[i]->is_in(addr),
1016              "addr should be in allocated part of generation");
1017       return _gens[i]->block_start(addr);
1018     }
1019   }
1020   if (perm_gen()->is_in_reserved(addr)) {
1021     assert(perm_gen()->is_in(addr),
1022            "addr should be in allocated part of perm gen");
1023     return perm_gen()->block_start(addr);
1024   }
1025   assert(false, "Some generation should contain the address");
1026   return NULL;
1027 }
1028 
1029 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
1030   assert(is_in_reserved(addr), "block_size of address outside of heap");
1031   for (int i = 0; i < _n_gens; i++) {
1032     if (_gens[i]->is_in_reserved(addr)) {
1033       assert(_gens[i]->is_in(addr),
1034              "addr should be in allocated part of generation");
1035       return _gens[i]->block_size(addr);
1036     }
1037   }
1038   if (perm_gen()->is_in_reserved(addr)) {
1039     assert(perm_gen()->is_in(addr),
1040            "addr should be in allocated part of perm gen");
1041     return perm_gen()->block_size(addr);
1042   }
1043   assert(false, "Some generation should contain the address");
1044   return 0;
1045 }
1046 
1047 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1048   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
1049   assert(block_start(addr) == addr, "addr must be a block start");
1050   for (int i = 0; i < _n_gens; i++) {
1051     if (_gens[i]->is_in_reserved(addr)) {
1052       return _gens[i]->block_is_obj(addr);
1053     }
1054   }
1055   if (perm_gen()->is_in_reserved(addr)) {
1056     return perm_gen()->block_is_obj(addr);
1057   }
1058   assert(false, "Some generation should contain the address");
1059   return false;
1060 }
1061 
1062 bool GenCollectedHeap::supports_tlab_allocation() const {
1063   for (int i = 0; i < _n_gens; i += 1) {
1064     if (_gens[i]->supports_tlab_allocation()) {
1065       return true;
1066     }
1067   }
1068   return false;
1069 }
1070 
1071 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
1072   size_t result = 0;
1073   for (int i = 0; i < _n_gens; i += 1) {
1074     if (_gens[i]->supports_tlab_allocation()) {
1075       result += _gens[i]->tlab_capacity();
1076     }
1077   }
1078   return result;
1079 }
1080 
1081 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1082   size_t result = 0;
1083   for (int i = 0; i < _n_gens; i += 1) {
1084     if (_gens[i]->supports_tlab_allocation()) {
1085       result += _gens[i]->unsafe_max_tlab_alloc();
1086     }
1087   }
1088   return result;
1089 }
1090 
1091 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1092   bool gc_overhead_limit_was_exceeded;
1093   return collector_policy()->mem_allocate_work(size /* size */,
1094                                                true /* is_tlab */,
1095                                                &gc_overhead_limit_was_exceeded);
1096 }
1097 
1098 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1099 // from the list headed by "*prev_ptr".
1100 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1101   bool first = true;
1102   size_t min_size = 0;   // "first" makes this conceptually infinite.
1103   ScratchBlock **smallest_ptr, *smallest;
1104   ScratchBlock  *cur = *prev_ptr;
1105   while (cur) {
1106     assert(*prev_ptr == cur, "just checking");
1107     if (first || cur->num_words < min_size) {
1108       smallest_ptr = prev_ptr;
1109       smallest     = cur;
1110       min_size     = smallest->num_words;
1111       first        = false;
1112     }
1113     prev_ptr = &cur->next;
1114     cur     =  cur->next;
1115   }
1116   smallest      = *smallest_ptr;
1117   *smallest_ptr = smallest->next;
1118   return smallest;
1119 }
1120 
1121 // Sort the scratch block list headed by res into decreasing size order,
1122 // and set "res" to the result.
1123 static void sort_scratch_list(ScratchBlock*& list) {
1124   ScratchBlock* sorted = NULL;
1125   ScratchBlock* unsorted = list;
1126   while (unsorted) {
1127     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1128     smallest->next  = sorted;
1129     sorted          = smallest;
1130   }
1131   list = sorted;
1132 }
1133 
1134 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1135                                                size_t max_alloc_words) {
1136   ScratchBlock* res = NULL;
1137   for (int i = 0; i < _n_gens; i++) {
1138     _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1139   }
1140   sort_scratch_list(res);
1141   return res;
1142 }
1143 
1144 void GenCollectedHeap::release_scratch() {
1145   for (int i = 0; i < _n_gens; i++) {
1146     _gens[i]->reset_scratch();
1147   }
1148 }
1149 
1150 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1151   void do_generation(Generation* gen) {
1152     gen->prepare_for_verify();
1153   }
1154 };
1155 
1156 void GenCollectedHeap::prepare_for_verify() {
1157   ensure_parsability(false);        // no need to retire TLABs
1158   GenPrepareForVerifyClosure blk;
1159   generation_iterate(&blk, false);
1160   perm_gen()->prepare_for_verify();
1161 }
1162 
1163 
1164 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1165                                           bool old_to_young) {
1166   if (old_to_young) {
1167     for (int i = _n_gens-1; i >= 0; i--) {
1168       cl->do_generation(_gens[i]);
1169     }
1170   } else {
1171     for (int i = 0; i < _n_gens; i++) {
1172       cl->do_generation(_gens[i]);
1173     }
1174   }
1175 }
1176 
1177 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1178   for (int i = 0; i < _n_gens; i++) {
1179     _gens[i]->space_iterate(cl, true);
1180   }
1181   perm_gen()->space_iterate(cl, true);
1182 }
1183 
1184 bool GenCollectedHeap::is_maximal_no_gc() const {
1185   for (int i = 0; i < _n_gens; i++) {  // skip perm gen
1186     if (!_gens[i]->is_maximal_no_gc()) {
1187       return false;
1188     }
1189   }
1190   return true;
1191 }
1192 
1193 void GenCollectedHeap::save_marks() {
1194   for (int i = 0; i < _n_gens; i++) {
1195     _gens[i]->save_marks();
1196   }
1197   perm_gen()->save_marks();
1198 }
1199 
1200 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
1201   for (int i = 0; i <= collectedGen; i++) {
1202     _gens[i]->compute_new_size();
1203   }
1204 }
1205 
1206 GenCollectedHeap* GenCollectedHeap::heap() {
1207   assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1208   assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1209   return _gch;
1210 }
1211 
1212 
1213 void GenCollectedHeap::prepare_for_compaction() {
1214   Generation* scanning_gen = _gens[_n_gens-1];
1215   // Start by compacting into same gen.
1216   CompactPoint cp(scanning_gen, NULL, NULL);
1217   while (scanning_gen != NULL) {
1218     scanning_gen->prepare_for_compaction(&cp);
1219     scanning_gen = prev_gen(scanning_gen);
1220   }
1221 }
1222 
1223 GCStats* GenCollectedHeap::gc_stats(int level) const {
1224   return _gens[level]->gc_stats();
1225 }
1226 
1227 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1228   if (!silent) {
1229     gclog_or_tty->print("permgen ");
1230   }
1231   perm_gen()->verify();
1232   for (int i = _n_gens-1; i >= 0; i--) {
1233     Generation* g = _gens[i];
1234     if (!silent) {
1235       gclog_or_tty->print(g->name());
1236       gclog_or_tty->print(" ");
1237     }
1238     g->verify();
1239   }
1240   if (!silent) {
1241     gclog_or_tty->print("remset ");
1242   }
1243   rem_set()->verify();
1244 }
1245 
1246 void GenCollectedHeap::print_on(outputStream* st) const {
1247   for (int i = 0; i < _n_gens; i++) {
1248     _gens[i]->print_on(st);
1249   }
1250   perm_gen()->print_on(st);
1251 }
1252 
1253 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1254   if (workers() != NULL) {
1255     workers()->threads_do(tc);
1256   }
1257 #ifndef SERIALGC
1258   if (UseConcMarkSweepGC) {
1259     ConcurrentMarkSweepThread::threads_do(tc);
1260   }
1261 #endif // SERIALGC
1262 }
1263 
1264 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1265 #ifndef SERIALGC
1266   if (UseParNewGC) {
1267     workers()->print_worker_threads_on(st);
1268   }
1269   if (UseConcMarkSweepGC) {
1270     ConcurrentMarkSweepThread::print_all_on(st);
1271   }
1272 #endif // SERIALGC
1273 }
1274 
1275 void GenCollectedHeap::print_tracing_info() const {
1276   if (TraceGen0Time) {
1277     get_gen(0)->print_summary_info();
1278   }
1279   if (TraceGen1Time) {
1280     get_gen(1)->print_summary_info();
1281   }
1282 }
1283 
1284 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1285   if (PrintGCDetails && Verbose) {
1286     gclog_or_tty->print(" "  SIZE_FORMAT
1287                         "->" SIZE_FORMAT
1288                         "("  SIZE_FORMAT ")",
1289                         prev_used, used(), capacity());
1290   } else {
1291     gclog_or_tty->print(" "  SIZE_FORMAT "K"
1292                         "->" SIZE_FORMAT "K"
1293                         "("  SIZE_FORMAT "K)",
1294                         prev_used / K, used() / K, capacity() / K);
1295   }
1296 }
1297 
1298 //New method to print perm gen info with PrintGCDetails flag
1299 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
1300   gclog_or_tty->print(", [%s :", perm_gen()->short_name());
1301   perm_gen()->print_heap_change(perm_prev_used);
1302   gclog_or_tty->print("]");
1303 }
1304 
1305 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1306  private:
1307   bool _full;
1308  public:
1309   void do_generation(Generation* gen) {
1310     gen->gc_prologue(_full);
1311   }
1312   GenGCPrologueClosure(bool full) : _full(full) {};
1313 };
1314 
1315 void GenCollectedHeap::gc_prologue(bool full) {
1316   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1317 
1318   always_do_update_barrier = false;
1319   // Fill TLAB's and such
1320   CollectedHeap::accumulate_statistics_all_tlabs();
1321   ensure_parsability(true);   // retire TLABs
1322 
1323   // Call allocation profiler
1324   AllocationProfiler::iterate_since_last_gc();
1325   // Walk generations
1326   GenGCPrologueClosure blk(full);
1327   generation_iterate(&blk, false);  // not old-to-young.
1328   perm_gen()->gc_prologue(full);
1329 };
1330 
1331 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1332  private:
1333   bool _full;
1334  public:
1335   void do_generation(Generation* gen) {
1336     gen->gc_epilogue(_full);
1337   }
1338   GenGCEpilogueClosure(bool full) : _full(full) {};
1339 };
1340 
1341 void GenCollectedHeap::gc_epilogue(bool full) {
1342 #ifdef COMPILER2
1343   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1344   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1345   guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1346 #endif /* COMPILER2 */
1347 
1348   resize_all_tlabs();
1349 
1350   GenGCEpilogueClosure blk(full);
1351   generation_iterate(&blk, false);  // not old-to-young.
1352   perm_gen()->gc_epilogue(full);
1353 
1354   if (!CleanChunkPoolAsync) {
1355     Chunk::clean_chunk_pool();
1356   }
1357 
1358   always_do_update_barrier = UseConcMarkSweepGC;
1359 };
1360 
1361 #ifndef PRODUCT
1362 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1363  private:
1364  public:
1365   void do_generation(Generation* gen) {
1366     gen->record_spaces_top();
1367   }
1368 };
1369 
1370 void GenCollectedHeap::record_gen_tops_before_GC() {
1371   if (ZapUnusedHeapArea) {
1372     GenGCSaveTopsBeforeGCClosure blk;
1373     generation_iterate(&blk, false);  // not old-to-young.
1374     perm_gen()->record_spaces_top();
1375   }
1376 }
1377 #endif  // not PRODUCT
1378 
1379 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1380  public:
1381   void do_generation(Generation* gen) {
1382     gen->ensure_parsability();
1383   }
1384 };
1385 
1386 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1387   CollectedHeap::ensure_parsability(retire_tlabs);
1388   GenEnsureParsabilityClosure ep_cl;
1389   generation_iterate(&ep_cl, false);
1390   perm_gen()->ensure_parsability();
1391 }
1392 
1393 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
1394                                               oop obj,
1395                                               size_t obj_size) {
1396   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1397   HeapWord* result = NULL;
1398 
1399   // First give each higher generation a chance to allocate the promoted object.
1400   Generation* allocator = next_gen(gen);
1401   if (allocator != NULL) {
1402     do {
1403       result = allocator->allocate(obj_size, false);
1404     } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
1405   }
1406 
1407   if (result == NULL) {
1408     // Then give gen and higher generations a chance to expand and allocate the
1409     // object.
1410     do {
1411       result = gen->expand_and_allocate(obj_size, false);
1412     } while (result == NULL && (gen = next_gen(gen)) != NULL);
1413   }
1414 
1415   if (result != NULL) {
1416     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1417   }
1418   return oop(result);
1419 }
1420 
1421 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1422   jlong _time;   // in ms
1423   jlong _now;    // in ms
1424 
1425  public:
1426   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1427 
1428   jlong time() { return _time; }
1429 
1430   void do_generation(Generation* gen) {
1431     _time = MIN2(_time, gen->time_of_last_gc(_now));
1432   }
1433 };
1434 
1435 jlong GenCollectedHeap::millis_since_last_gc() {
1436   // We need a monotonically non-deccreasing time in ms but
1437   // os::javaTimeMillis() does not guarantee monotonicity.
1438   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1439   GenTimeOfLastGCClosure tolgc_cl(now);
1440   // iterate over generations getting the oldest
1441   // time that a generation was collected
1442   generation_iterate(&tolgc_cl, false);
1443   tolgc_cl.do_generation(perm_gen());
1444 
1445   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1446   // provided the underlying platform provides such a time source
1447   // (and it is bug free). So we still have to guard against getting
1448   // back a time later than 'now'.
1449   jlong retVal = now - tolgc_cl.time();
1450   if (retVal < 0) {
1451     NOT_PRODUCT(warning("time warp: "INT64_FORMAT, retVal);)
1452     return 0;
1453   }
1454   return retVal;
1455 }