< prev index next >

src/share/vm/memory/genCollectedHeap.cpp

Print this page




 111   size_t total_reserved = 0;
 112   ReservedSpace heap_rs;
 113 
 114   size_t heap_alignment = collector_policy()->heap_alignment();
 115 
 116   heap_address = allocate(heap_alignment, &total_reserved, &heap_rs);
 117 
 118   if (!heap_rs.is_reserved()) {
 119     vm_shutdown_during_initialization(
 120       "Could not reserve enough space for object heap");
 121     return JNI_ENOMEM;
 122   }
 123 
 124   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 125 
 126   _rem_set = collector_policy()->create_rem_set(reserved_region());
 127   set_barrier_set(rem_set()->bs());
 128 
 129   _gch = this;
 130 
 131   for (i = 0; i < _n_gens; i++) {
 132     ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
 133     _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
 134     heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
 135   }


 136   clear_incremental_collection_failed();
 137 
 138 #if INCLUDE_ALL_GCS
 139   // If we are running CMS, create the collector responsible
 140   // for collecting the CMS generations.
 141   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 142     bool success = create_cms_collector();
 143     if (!success) return JNI_ENOMEM;
 144   }
 145 #endif // INCLUDE_ALL_GCS
 146 
 147   return JNI_OK;
 148 }
 149 
 150 
 151 char* GenCollectedHeap::allocate(size_t alignment,
 152                                  size_t* _total_reserved,
 153                                  ReservedSpace* heap_rs){
 154   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
 155     "the maximum representable size";
 156 
 157   // Now figure out the total size.
 158   size_t total_reserved = 0;
 159   const size_t pageSize = UseLargePages ?
 160       os::large_page_size() : os::vm_page_size();
 161 
 162   assert(alignment % pageSize == 0, "Must be");
 163 
 164   for (int i = 0; i < _n_gens; i++) {
 165     total_reserved += _gen_specs[i]->max_size();
 166     if (total_reserved < _gen_specs[i]->max_size()) {
 167       vm_exit_during_initialization(overflow_msg);
 168     }
 169   }
 170   assert(total_reserved % alignment == 0,
 171          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 172                  SIZE_FORMAT, total_reserved, alignment));
 173 
 174   *_total_reserved = total_reserved;
 175 
 176   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 177   return heap_rs->base();
 178 }
 179 
 180 
 181 void GenCollectedHeap::post_initialize() {
 182   SharedHeap::post_initialize();
 183   GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
 184   guarantee(policy->is_generation_policy(), "Illegal policy type");
 185   assert((get_gen(0)->kind() == Generation::DefNew) ||
 186          (get_gen(0)->kind() == Generation::ParNew),
 187     "Wrong youngest generation type");
 188   DefNewGeneration* def_new_gen = (DefNewGeneration*)get_gen(0);
 189 
 190   Generation* old_gen = get_gen(1);
 191   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
 192          old_gen->kind() == Generation::MarkSweepCompact,
 193     "Wrong generation kind");
 194 
 195   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 196                                  old_gen->capacity(),
 197                                  def_new_gen->from()->capacity());
 198   policy->initialize_gc_policy_counters();
 199 }
 200 
 201 void GenCollectedHeap::ref_processing_init() {
 202   SharedHeap::ref_processing_init();
 203   for (int i = 0; i < _n_gens; i++) {
 204     _gens[i]->ref_processor_init();
 205   }
 206 }
 207 
 208 size_t GenCollectedHeap::capacity() const {
 209   size_t res = 0;
 210   for (int i = 0; i < _n_gens; i++) {
 211     res += _gens[i]->capacity();
 212   }
 213   return res;
 214 }
 215 
 216 size_t GenCollectedHeap::used() const {
 217   size_t res = 0;
 218   for (int i = 0; i < _n_gens; i++) {
 219     res += _gens[i]->used();
 220   }
 221   return res;
 222 }
 223 
 224 // Save the "used_region" for generations level and lower.
 225 void GenCollectedHeap::save_used_regions(int level) {
 226   assert(level < _n_gens, "Illegal level parameter");
 227   for (int i = level; i >= 0; i--) {
 228     _gens[i]->save_used_region();
 229   }

 230 }
 231 
 232 size_t GenCollectedHeap::max_capacity() const {
 233   size_t res = 0;
 234   for (int i = 0; i < _n_gens; i++) {
 235     res += _gens[i]->max_capacity();
 236   }
 237   return res;
 238 }
 239 
 240 // Update the _full_collections_completed counter
 241 // at the end of a stop-world full GC.
 242 unsigned int GenCollectedHeap::update_full_collections_completed() {
 243   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 244   assert(_full_collections_completed <= _total_full_collections,
 245          "Can't complete more collections than were started");
 246   _full_collections_completed = _total_full_collections;
 247   ml.notify_all();
 248   return _full_collections_completed;
 249 }
 250 
 251 // Update the _full_collections_completed counter, as appropriate,
 252 // at the end of a concurrent GC cycle. Note the conditional update
 253 // below to allow this method to be called by a concurrent collector
 254 // without synchronizing in any manner with the VM thread (which
 255 // may already have initiated a STW full collection "concurrently").
 256 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
 257   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);


 281 // higher than we are prepared to pay for such rudimentary debugging
 282 // support.
 283 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
 284                                                          size_t size) {
 285   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 286     // We are asked to check a size in HeapWords,
 287     // but the memory is mangled in juint words.
 288     juint* start = (juint*) (addr + skip_header_HeapWords());
 289     juint* end   = (juint*) (addr + size);
 290     for (juint* slot = start; slot < end; slot += 1) {
 291       assert(*slot == badHeapWordVal,
 292              "Found non badHeapWordValue in pre-allocation check");
 293     }
 294   }
 295 }
 296 #endif
 297 
 298 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 299                                                bool is_tlab,
 300                                                bool first_only) {
 301   HeapWord* res;
 302   for (int i = 0; i < _n_gens; i++) {
 303     if (_gens[i]->should_allocate(size, is_tlab)) {
 304       res = _gens[i]->allocate(size, is_tlab);
 305       if (res != NULL) return res;
 306       else if (first_only) break;
 307     }
 308   }
 309   // Otherwise...
 310   return NULL;




 311 }
 312 
 313 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 314                                          bool* gc_overhead_limit_was_exceeded) {
 315   return collector_policy()->mem_allocate_work(size,
 316                                                false /* is_tlab */,
 317                                                gc_overhead_limit_was_exceeded);
 318 }
 319 
 320 bool GenCollectedHeap::must_clear_all_soft_refs() {
 321   return _gc_cause == GCCause::_last_ditch_collection;
 322 }
 323 
 324 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 325   return UseConcMarkSweepGC &&
 326          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
 327           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
 328 }
 329 
 330 void GenCollectedHeap::do_collection(bool  full,
 331                                      bool   clear_all_soft_refs,
 332                                      size_t size,
 333                                      bool   is_tlab,
 334                                      int    max_level) {
 335   bool prepared_for_verification = false;
 336   ResourceMark rm;
 337   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 338 
 339   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 340   assert(my_thread->is_VM_thread() ||
 341          my_thread->is_ConcurrentGC_thread(),
 342          "incorrect thread type capability");
 343   assert(Heap_lock->is_locked(),
 344          "the requesting thread should have the Heap_lock");
 345   guarantee(!is_gc_active(), "collection is not reentrant");
 346   assert(max_level < n_gens(), "sanity check");
 347 
 348   if (GC_locker::check_active_before_gc()) {
 349     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 350   }
 351 
 352   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 353                           collector_policy()->should_clear_all_soft_refs();
 354 
 355   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 356 
 357   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 358 
 359   print_heap_before_gc();
 360 
 361   {
 362     FlagSetting fl(_is_gc_active, true);
 363 
 364     bool complete = full && (max_level == (n_gens()-1));
 365     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 366     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 367     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 368     // so we can assume here that the next GC id is what we want.
 369     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
 370 
 371     gc_prologue(complete);
 372     increment_total_collections(complete);
 373 
 374     size_t gch_prev_used = used();
 375 
 376     int starting_level = 0;
 377     if (full) {
 378       // Search for the oldest generation which will collect all younger
 379       // generations, and start collection loop there.
 380       for (int i = max_level; i >= 0; i--) {
 381         if (_gens[i]->full_collects_younger_generations()) {
 382           starting_level = i;
 383           break;
 384         }
 385       }
 386     }
 387 
 388     bool must_restore_marks_for_biased_locking = false;
 389 
 390     int max_level_collected = starting_level;
 391     for (int i = starting_level; i <= max_level; i++) {
 392       if (_gens[i]->should_collect(full, size, is_tlab)) {
 393         if (i == n_gens() - 1) {  // a major collection is to happen
 394           if (!complete) {
 395             // The full_collections increment was missed above.
 396             increment_total_full_collections();
 397           }
 398           pre_full_gc_dump(NULL);    // do any pre full gc dumps
 399         }
 400         // Timer for individual generations. Last argument is false: no CR
 401         // FIXME: We should try to start the timing earlier to cover more of the GC pause
 402         // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 403         // so we can assume here that the next GC id is what we want.
 404         GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 405         TraceCollectorStats tcs(_gens[i]->counters());
 406         TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
 407 
 408         size_t prev_used = _gens[i]->used();
 409         _gens[i]->stat_record()->invocations++;
 410         _gens[i]->stat_record()->accumulated_time.start();
 411 
 412         // Must be done anew before each collection because
 413         // a previous collection will do mangling and will
 414         // change top of some spaces.
 415         record_gen_tops_before_GC();
 416 
 417         if (PrintGC && Verbose) {
 418           gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
 419                      i,
 420                      _gens[i]->stat_record()->invocations,
 421                      size*HeapWordSize);
 422         }
 423 
 424         if (VerifyBeforeGC && i >= VerifyGCLevel &&
 425             total_collections() >= VerifyGCStartAt) {
 426           HandleMark hm;  // Discard invalid handles created during verification
 427           if (!prepared_for_verification) {
 428             prepare_for_verify();
 429             prepared_for_verification = true;
 430           }
 431           Universe::verify(" VerifyBeforeGC:");
 432         }
 433         COMPILER2_PRESENT(DerivedPointerTable::clear());
 434 
 435         if (!must_restore_marks_for_biased_locking &&
 436             _gens[i]->performs_in_place_marking()) {
 437           // We perform this mark word preservation work lazily
 438           // because it's only at this point that we know whether we
 439           // absolutely have to do it; we want to avoid doing it for
 440           // scavenge-only collections where it's unnecessary
 441           must_restore_marks_for_biased_locking = true;
 442           BiasedLocking::preserve_marks();
 443         }
 444 
 445         // Do collection work
 446         {
 447           // Note on ref discovery: For what appear to be historical reasons,
 448           // GCH enables and disabled (by enqueing) refs discovery.
 449           // In the future this should be moved into the generation's
 450           // collect method so that ref discovery and enqueueing concerns
 451           // are local to a generation. The collect method could return
 452           // an appropriate indication in the case that notification on
 453           // the ref lock was needed. This will make the treatment of
 454           // weak refs more uniform (and indeed remove such concerns
 455           // from GCH). XXX
 456 
 457           HandleMark hm;  // Discard invalid handles created during gc
 458           save_marks();   // save marks for all gens
 459           // We want to discover references, but not process them yet.
 460           // This mode is disabled in process_discovered_references if the
 461           // generation does some collection work, or in
 462           // enqueue_discovered_references if the generation returns
 463           // without doing any work.
 464           ReferenceProcessor* rp = _gens[i]->ref_processor();
 465           // If the discovery of ("weak") refs in this generation is
 466           // atomic wrt other collectors in this configuration, we
 467           // are guaranteed to have empty discovered ref lists.
 468           if (rp->discovery_is_atomic()) {
 469             rp->enable_discovery();
 470             rp->setup_policy(do_clear_all_soft_refs);
 471           } else {
 472             // collect() below will enable discovery as appropriate
 473           }
 474           _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
 475           if (!rp->enqueuing_is_done()) {
 476             rp->enqueue_discovered_references();
 477           } else {
 478             rp->set_enqueuing_is_done(false);
 479           }
 480           rp->verify_no_references_recorded();
 481         }
 482         max_level_collected = i;
 483 
 484         // Determine if allocation request was met.
 485         if (size > 0) {
 486           if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
 487             if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
 488               size = 0;
 489             }
 490           }
 491         }
 492 
 493         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 494 
 495         _gens[i]->stat_record()->accumulated_time.stop();
 496 
 497         update_gc_stats(i, full);
 498 
 499         if (VerifyAfterGC && i >= VerifyGCLevel &&
 500             total_collections() >= VerifyGCStartAt) {
 501           HandleMark hm;  // Discard invalid handles created during verification
 502           Universe::verify(" VerifyAfterGC:");
 503         }
 504 
 505         if (PrintGCDetails) {
 506           gclog_or_tty->print(":");
 507           _gens[i]->print_heap_change(prev_used);
 508         }





















 509       }










































































 510     }
 511 
 512     // Update "complete" boolean wrt what actually transpired --
 513     // for instance, a promotion failure could have led to
 514     // a whole heap collection.
 515     complete = complete || (max_level_collected == n_gens() - 1);
 516 
 517     if (complete) { // We did a "major" collection
 518       // FIXME: See comment at pre_full_gc_dump call
 519       post_full_gc_dump(NULL);   // do any post full gc dumps
 520     }
 521 
 522     if (PrintGCDetails) {
 523       print_heap_change(gch_prev_used);
 524 
 525       // Print metaspace info for full GC with PrintGCDetails flag.
 526       if (complete) {
 527         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 528       }
 529     }
 530 
 531     for (int j = max_level_collected; j >= 0; j -= 1) {
 532       // Adjust generation sizes.
 533       _gens[j]->compute_new_size();

 534     }

 535 
 536     if (complete) {
 537       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 538       ClassLoaderDataGraph::purge();
 539       MetaspaceAux::verify_metrics();
 540       // Resize the metaspace capacity after full collections
 541       MetaspaceGC::compute_new_size();
 542       update_full_collections_completed();
 543     }
 544 
 545     // Track memory usage and detect low memory after GC finishes
 546     MemoryService::track_memory_usage();
 547 
 548     gc_epilogue(complete);
 549 
 550     if (must_restore_marks_for_biased_locking) {
 551       BiasedLocking::restore_marks();
 552     }
 553   }
 554 


 571 void GenCollectedHeap::
 572 gen_process_roots(int level,
 573                   bool younger_gens_as_roots,
 574                   bool activate_scope,
 575                   SharedHeap::ScanningOption so,
 576                   OopsInGenClosure* not_older_gens,
 577                   OopsInGenClosure* weak_roots,
 578                   OopsInGenClosure* older_gens,
 579                   CLDClosure* cld_closure,
 580                   CLDClosure* weak_cld_closure,
 581                   CodeBlobClosure* code_closure) {
 582 
 583   // General roots.
 584   SharedHeap::process_roots(activate_scope, so,
 585                             not_older_gens, weak_roots,
 586                             cld_closure, weak_cld_closure,
 587                             code_closure);
 588 
 589   if (younger_gens_as_roots) {
 590     if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 591       for (int i = 0; i < level; i++) {
 592         not_older_gens->set_generation(_gens[i]);
 593         _gens[i]->oop_iterate(not_older_gens);
 594       }
 595       not_older_gens->reset_generation();
 596     }
 597   }
 598   // When collection is parallel, all threads get to cooperate to do
 599   // older-gen scanning.
 600   for (int i = level+1; i < _n_gens; i++) {
 601     older_gens->set_generation(_gens[i]);
 602     rem_set()->younger_refs_iterate(_gens[i], older_gens);
 603     older_gens->reset_generation();
 604   }
 605 
 606   _gen_process_roots_tasks->all_tasks_completed();
 607 }
 608 
 609 void GenCollectedHeap::
 610 gen_process_roots(int level,
 611                   bool younger_gens_as_roots,
 612                   bool activate_scope,
 613                   SharedHeap::ScanningOption so,
 614                   bool only_strong_roots,
 615                   OopsInGenClosure* not_older_gens,
 616                   OopsInGenClosure* older_gens,
 617                   CLDClosure* cld_closure) {
 618 
 619   const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
 620 
 621   bool is_moving_collection = false;
 622   if (level == 0 || is_adjust_phase) {
 623     // young collections are always moving
 624     is_moving_collection = true;
 625   }
 626 
 627   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
 628   CodeBlobClosure* code_closure = &mark_code_closure;
 629 
 630   gen_process_roots(level,
 631                     younger_gens_as_roots,
 632                     activate_scope, so,
 633                     not_older_gens, only_strong_roots ? NULL : not_older_gens,
 634                     older_gens,
 635                     cld_closure, only_strong_roots ? NULL : cld_closure,
 636                     code_closure);
 637 
 638 }
 639 
 640 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 641   SharedHeap::process_weak_roots(root_closure);
 642   // "Local" "weak" refs
 643   for (int i = 0; i < _n_gens; i++) {
 644     _gens[i]->ref_processor()->weak_oops_do(root_closure);
 645   }
 646 }
 647 
 648 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 649 void GenCollectedHeap::                                                 \
 650 oop_since_save_marks_iterate(int level,                                 \
 651                              OopClosureType* cur,                       \
 652                              OopClosureType* older) {                   \
 653   _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);           \
 654   for (int i = level+1; i < n_gens(); i++) {                            \
 655     _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);           \


 656   }                                                                     \
 657 }
 658 
 659 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 660 
 661 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 662 
 663 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
 664   for (int i = level; i < _n_gens; i++) {
 665     if (!_gens[i]->no_allocs_since_save_marks()) return false;
 666   }
 667   return true;
 668 }
 669 
 670 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 671   return _gens[0]->supports_inline_contig_alloc();
 672 }
 673 
 674 HeapWord** GenCollectedHeap::top_addr() const {
 675   return _gens[0]->top_addr();
 676 }
 677 
 678 HeapWord** GenCollectedHeap::end_addr() const {
 679   return _gens[0]->end_addr();
 680 }
 681 
 682 // public collection interfaces
 683 
 684 void GenCollectedHeap::collect(GCCause::Cause cause) {
 685   if (should_do_concurrent_full_gc(cause)) {
 686 #if INCLUDE_ALL_GCS
 687     // mostly concurrent full collection
 688     collect_mostly_concurrent(cause);
 689 #else  // INCLUDE_ALL_GCS
 690     ShouldNotReachHere();
 691 #endif // INCLUDE_ALL_GCS
 692   } else if (cause == GCCause::_wb_young_gc) {
 693     // minor collection for WhiteBox API
 694     collect(cause, 0);
 695   } else {
 696 #ifdef ASSERT
 697   if (cause == GCCause::_scavenge_alot) {
 698     // minor collection only
 699     collect(cause, 0);


 722 }
 723 
 724 // this is the private collection interface
 725 // The Heap_lock is expected to be held on entry.
 726 
 727 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
 728   // Read the GC count while holding the Heap_lock
 729   unsigned int gc_count_before      = total_collections();
 730   unsigned int full_gc_count_before = total_full_collections();
 731   {
 732     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 733     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 734                          cause, max_level);
 735     VMThread::execute(&op);
 736   }
 737 }
 738 
 739 #if INCLUDE_ALL_GCS
 740 bool GenCollectedHeap::create_cms_collector() {
 741 
 742   assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep,
 743          "Unexpected generation kinds");
 744   // Skip two header words in the block content verification
 745   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 746   CMSCollector* collector = new CMSCollector(
 747     (ConcurrentMarkSweepGeneration*)_gens[1],
 748     _rem_set->as_CardTableRS(),
 749     (ConcurrentMarkSweepPolicy*) collector_policy());
 750 
 751   if (collector == NULL || !collector->completed_initialization()) {
 752     if (collector) {
 753       delete collector;  // Be nice in embedded situation
 754     }
 755     vm_shutdown_during_initialization("Could not create CMS collector");
 756     return false;
 757   }
 758   return true;  // success
 759 }
 760 
 761 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 762   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 763 
 764   MutexLocker ml(Heap_lock);
 765   // Read the GC counts while holding the Heap_lock
 766   unsigned int full_gc_count_before = total_full_collections();
 767   unsigned int gc_count_before      = total_collections();


 794                 local_max_level      /* max_level */);
 795   // Hack XXX FIX ME !!!
 796   // A scavenge may not have been attempted, or may have
 797   // been attempted and failed, because the old gen was too full
 798   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
 799       incremental_collection_will_fail(false /* don't consult_young */)) {
 800     if (PrintGCDetails) {
 801       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 802                              "because scavenge failed");
 803     }
 804     // This time allow the old gen to be collected as well
 805     do_collection(true                 /* full */,
 806                   clear_all_soft_refs  /* clear_all_soft_refs */,
 807                   0                    /* size */,
 808                   false                /* is_tlab */,
 809                   n_gens() - 1         /* max_level */);
 810   }
 811 }
 812 
 813 bool GenCollectedHeap::is_in_young(oop p) {
 814   bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
 815   assert(result == _gens[0]->is_in_reserved(p),
 816          err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
 817   return result;
 818 }
 819 
 820 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 821 bool GenCollectedHeap::is_in(const void* p) const {
 822   #ifndef ASSERT
 823   guarantee(VerifyBeforeGC      ||
 824             VerifyDuringGC      ||
 825             VerifyBeforeExit    ||
 826             VerifyDuringStartup ||
 827             PrintAssembly       ||
 828             tty->count() != 0   ||   // already printing
 829             VerifyAfterGC       ||
 830     VMError::fatal_error_in_progress(), "too expensive");
 831 
 832   #endif
 833   // This might be sped up with a cache of the last generation that
 834   // answered yes.
 835   for (int i = 0; i < _n_gens; i++) {
 836     if (_gens[i]->is_in(p)) return true;
 837   }
 838   // Otherwise...
 839   return false;
 840 }
 841 
 842 #ifdef ASSERT
 843 // Don't implement this by using is_in_young().  This method is used
 844 // in some cases to check that is_in_young() is correct.
 845 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 846   assert(is_in_reserved(p) || p == NULL,
 847     "Does not work if address is non-null and outside of the heap");
 848   return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
 849 }
 850 #endif
 851 
 852 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
 853   for (int i = 0; i < _n_gens; i++) {
 854     _gens[i]->oop_iterate(cl);
 855   }
 856 }
 857 
 858 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 859   for (int i = 0; i < _n_gens; i++) {
 860     _gens[i]->object_iterate(cl);
 861   }
 862 }
 863 
 864 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 865   for (int i = 0; i < _n_gens; i++) {
 866     _gens[i]->safe_object_iterate(cl);
 867   }
 868 }
 869 
 870 Space* GenCollectedHeap::space_containing(const void* addr) const {
 871   for (int i = 0; i < _n_gens; i++) {
 872     Space* res = _gens[i]->space_containing(addr);
 873     if (res != NULL) return res;
 874   }
 875   // Otherwise...
 876   assert(false, "Could not find containing space");
 877   return NULL;
 878 }
 879 
 880 
 881 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 882   assert(is_in_reserved(addr), "block_start of address outside of heap");
 883   for (int i = 0; i < _n_gens; i++) {
 884     if (_gens[i]->is_in_reserved(addr)) {
 885       assert(_gens[i]->is_in(addr),
 886              "addr should be in allocated part of generation");
 887       return _gens[i]->block_start(addr);
 888     }
 889   }
 890   assert(false, "Some generation should contain the address");
 891   return NULL;

 892 }
 893 
 894 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
 895   assert(is_in_reserved(addr), "block_size of address outside of heap");
 896   for (int i = 0; i < _n_gens; i++) {
 897     if (_gens[i]->is_in_reserved(addr)) {
 898       assert(_gens[i]->is_in(addr),
 899              "addr should be in allocated part of generation");
 900       return _gens[i]->block_size(addr);
 901     }
 902   }
 903   assert(false, "Some generation should contain the address");
 904   return 0;


 905 }
 906 
 907 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 908   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 909   assert(block_start(addr) == addr, "addr must be a block start");
 910   for (int i = 0; i < _n_gens; i++) {
 911     if (_gens[i]->is_in_reserved(addr)) {
 912       return _gens[i]->block_is_obj(addr);
 913     }
 914   }
 915   assert(false, "Some generation should contain the address");
 916   return false;
 917 }
 918 
 919 bool GenCollectedHeap::supports_tlab_allocation() const {
 920   for (int i = 0; i < _n_gens; i += 1) {
 921     if (_gens[i]->supports_tlab_allocation()) {
 922       return true;
 923     }
 924   }
 925   return false;
 926 }
 927 
 928 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 929   size_t result = 0;
 930   for (int i = 0; i < _n_gens; i += 1) {
 931     if (_gens[i]->supports_tlab_allocation()) {
 932       result += _gens[i]->tlab_capacity();
 933     }
 934   }
 935   return result;
 936 }
 937 
 938 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 939   size_t result = 0;
 940   for (int i = 0; i < _n_gens; i += 1) {
 941     if (_gens[i]->supports_tlab_allocation()) {
 942       result += _gens[i]->tlab_used();
 943     }
 944   }
 945   return result;
 946 }
 947 
 948 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 949   size_t result = 0;
 950   for (int i = 0; i < _n_gens; i += 1) {
 951     if (_gens[i]->supports_tlab_allocation()) {
 952       result += _gens[i]->unsafe_max_tlab_alloc();
 953     }
 954   }
 955   return result;
 956 }
 957 
 958 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
 959   bool gc_overhead_limit_was_exceeded;
 960   return collector_policy()->mem_allocate_work(size /* size */,
 961                                                true /* is_tlab */,
 962                                                &gc_overhead_limit_was_exceeded);
 963 }
 964 
 965 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
 966 // from the list headed by "*prev_ptr".
 967 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
 968   bool first = true;
 969   size_t min_size = 0;   // "first" makes this conceptually infinite.
 970   ScratchBlock **smallest_ptr, *smallest;
 971   ScratchBlock  *cur = *prev_ptr;
 972   while (cur) {
 973     assert(*prev_ptr == cur, "just checking");
 974     if (first || cur->num_words < min_size) {
 975       smallest_ptr = prev_ptr;


 984   *smallest_ptr = smallest->next;
 985   return smallest;
 986 }
 987 
 988 // Sort the scratch block list headed by res into decreasing size order,
 989 // and set "res" to the result.
 990 static void sort_scratch_list(ScratchBlock*& list) {
 991   ScratchBlock* sorted = NULL;
 992   ScratchBlock* unsorted = list;
 993   while (unsorted) {
 994     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
 995     smallest->next  = sorted;
 996     sorted          = smallest;
 997   }
 998   list = sorted;
 999 }
1000 
1001 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1002                                                size_t max_alloc_words) {
1003   ScratchBlock* res = NULL;
1004   for (int i = 0; i < _n_gens; i++) {
1005     _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1006   }
1007   sort_scratch_list(res);
1008   return res;
1009 }
1010 
1011 void GenCollectedHeap::release_scratch() {
1012   for (int i = 0; i < _n_gens; i++) {
1013     _gens[i]->reset_scratch();
1014   }
1015 }
1016 
1017 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1018   void do_generation(Generation* gen) {
1019     gen->prepare_for_verify();
1020   }
1021 };
1022 
1023 void GenCollectedHeap::prepare_for_verify() {
1024   ensure_parsability(false);        // no need to retire TLABs
1025   GenPrepareForVerifyClosure blk;
1026   generation_iterate(&blk, false);
1027 }
1028 
1029 
1030 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1031                                           bool old_to_young) {
1032   if (old_to_young) {
1033     for (int i = _n_gens-1; i >= 0; i--) {
1034       cl->do_generation(_gens[i]);
1035     }
1036   } else {
1037     for (int i = 0; i < _n_gens; i++) {
1038       cl->do_generation(_gens[i]);
1039     }
1040   }
1041 }
1042 
1043 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1044   for (int i = 0; i < _n_gens; i++) {
1045     _gens[i]->space_iterate(cl, true);
1046   }
1047 }
1048 
1049 bool GenCollectedHeap::is_maximal_no_gc() const {
1050   for (int i = 0; i < _n_gens; i++) {
1051     if (!_gens[i]->is_maximal_no_gc()) {
1052       return false;
1053     }
1054   }
1055   return true;
1056 }
1057 
1058 void GenCollectedHeap::save_marks() {
1059   for (int i = 0; i < _n_gens; i++) {
1060     _gens[i]->save_marks();
1061   }
1062 }
1063 
1064 GenCollectedHeap* GenCollectedHeap::heap() {
1065   assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1066   assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1067   return _gch;
1068 }
1069 
1070 
1071 void GenCollectedHeap::prepare_for_compaction() {
1072   guarantee(_n_gens = 2, "Wrong number of generations");
1073   Generation* old_gen = _gens[1];
1074   // Start by compacting into same gen.
1075   CompactPoint cp(old_gen);
1076   old_gen->prepare_for_compaction(&cp);
1077   Generation* young_gen = _gens[0];
1078   young_gen->prepare_for_compaction(&cp);
1079 }
1080 
1081 GCStats* GenCollectedHeap::gc_stats(int level) const {
1082   return _gens[level]->gc_stats();




1083 }
1084 
1085 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1086   for (int i = _n_gens-1; i >= 0; i--) {
1087     Generation* g = _gens[i];
1088     if (!silent) {
1089       gclog_or_tty->print("%s", g->name());
1090       gclog_or_tty->print(" ");
1091     }
1092     g->verify();




1093   }


1094   if (!silent) {
1095     gclog_or_tty->print("remset ");
1096   }
1097   rem_set()->verify();
1098 }
1099 
1100 void GenCollectedHeap::print_on(outputStream* st) const {
1101   for (int i = 0; i < _n_gens; i++) {
1102     _gens[i]->print_on(st);
1103   }
1104   MetaspaceAux::print_on(st);
1105 }
1106 
1107 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1108   if (workers() != NULL) {
1109     workers()->threads_do(tc);
1110   }
1111 #if INCLUDE_ALL_GCS
1112   if (UseConcMarkSweepGC) {
1113     ConcurrentMarkSweepThread::threads_do(tc);
1114   }
1115 #endif // INCLUDE_ALL_GCS
1116 }
1117 
1118 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1119 #if INCLUDE_ALL_GCS
1120   if (UseConcMarkSweepGC) {
1121     workers()->print_worker_threads_on(st);
1122     ConcurrentMarkSweepThread::print_all_on(st);
1123   }




 111   size_t total_reserved = 0;
 112   ReservedSpace heap_rs;
 113 
 114   size_t heap_alignment = collector_policy()->heap_alignment();
 115 
 116   heap_address = allocate(heap_alignment, &total_reserved, &heap_rs);
 117 
 118   if (!heap_rs.is_reserved()) {
 119     vm_shutdown_during_initialization(
 120       "Could not reserve enough space for object heap");
 121     return JNI_ENOMEM;
 122   }
 123 
 124   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 125 
 126   _rem_set = collector_policy()->create_rem_set(reserved_region());
 127   set_barrier_set(rem_set()->bs());
 128 
 129   _gch = this;
 130 
 131   ReservedSpace young_rs = heap_rs.first_part(_gen_specs[0]->max_size(), false, false);
 132   _young_gen = _gen_specs[0]->init(young_rs, 0, rem_set());
 133   heap_rs = heap_rs.last_part(_gen_specs[0]->max_size());
 134 
 135   ReservedSpace old_rs = heap_rs.first_part(_gen_specs[1]->max_size(), false, false);
 136   _old_gen = _gen_specs[1]->init(old_rs, 1, rem_set());
 137   heap_rs = heap_rs.last_part(_gen_specs[1]->max_size());
 138   clear_incremental_collection_failed();
 139 
 140 #if INCLUDE_ALL_GCS
 141   // If we are running CMS, create the collector responsible
 142   // for collecting the CMS generations.
 143   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 144     bool success = create_cms_collector();
 145     if (!success) return JNI_ENOMEM;
 146   }
 147 #endif // INCLUDE_ALL_GCS
 148 
 149   return JNI_OK;
 150 }
 151 

 152 char* GenCollectedHeap::allocate(size_t alignment,
 153                                  size_t* _total_reserved,
 154                                  ReservedSpace* heap_rs){
 155   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
 156     "the maximum representable size";
 157 
 158   // Now figure out the total size.
 159   size_t total_reserved = 0;
 160   const size_t pageSize = UseLargePages ?
 161       os::large_page_size() : os::vm_page_size();
 162 
 163   assert(alignment % pageSize == 0, "Must be");
 164 
 165   for (int i = 0; i < _n_gens; i++) {
 166     total_reserved += _gen_specs[i]->max_size();
 167     if (total_reserved < _gen_specs[i]->max_size()) {
 168       vm_exit_during_initialization(overflow_msg);
 169     }
 170   }
 171   assert(total_reserved % alignment == 0,
 172          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 173                  SIZE_FORMAT, total_reserved, alignment));
 174 
 175   *_total_reserved = total_reserved;
 176 
 177   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 178   return heap_rs->base();
 179 }
 180 

 181 void GenCollectedHeap::post_initialize() {
 182   SharedHeap::post_initialize();
 183   GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
 184   guarantee(policy->is_generation_policy(), "Illegal policy type");
 185   assert((get_gen(0)->kind() == Generation::DefNew) ||
 186          (get_gen(0)->kind() == Generation::ParNew),
 187     "Wrong youngest generation type");
 188   DefNewGeneration* def_new_gen = (DefNewGeneration*)get_gen(0);
 189 
 190   Generation* old_gen = get_gen(1);
 191   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
 192          old_gen->kind() == Generation::MarkSweepCompact,
 193     "Wrong generation kind");
 194 
 195   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 196                                  old_gen->capacity(),
 197                                  def_new_gen->from()->capacity());
 198   policy->initialize_gc_policy_counters();
 199 }
 200 
 201 void GenCollectedHeap::ref_processing_init() {
 202   SharedHeap::ref_processing_init();
 203   _young_gen->ref_processor_init();
 204   _old_gen->ref_processor_init();

 205 }
 206 
 207 size_t GenCollectedHeap::capacity() const {
 208   return _young_gen->capacity() + _old_gen->capacity();




 209 }
 210 
 211 size_t GenCollectedHeap::used() const {
 212   return _young_gen->used() + _old_gen->used();




 213 }
 214 
 215 // Save the "used_region" for generations level and lower.
 216 void GenCollectedHeap::save_used_regions(int level) {
 217   assert(level < _n_gens, "Illegal level parameter");
 218   if (level == 1) {
 219     _old_gen->save_used_region();
 220   }
 221   _young_gen->save_used_region();
 222 }
 223 
 224 size_t GenCollectedHeap::max_capacity() const {
 225   return _young_gen->max_capacity() + _old_gen->max_capacity();




 226 }
 227 
 228 // Update the _full_collections_completed counter
 229 // at the end of a stop-world full GC.
 230 unsigned int GenCollectedHeap::update_full_collections_completed() {
 231   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 232   assert(_full_collections_completed <= _total_full_collections,
 233          "Can't complete more collections than were started");
 234   _full_collections_completed = _total_full_collections;
 235   ml.notify_all();
 236   return _full_collections_completed;
 237 }
 238 
 239 // Update the _full_collections_completed counter, as appropriate,
 240 // at the end of a concurrent GC cycle. Note the conditional update
 241 // below to allow this method to be called by a concurrent collector
 242 // without synchronizing in any manner with the VM thread (which
 243 // may already have initiated a STW full collection "concurrently").
 244 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
 245   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);


 269 // higher than we are prepared to pay for such rudimentary debugging
 270 // support.
 271 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
 272                                                          size_t size) {
 273   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 274     // We are asked to check a size in HeapWords,
 275     // but the memory is mangled in juint words.
 276     juint* start = (juint*) (addr + skip_header_HeapWords());
 277     juint* end   = (juint*) (addr + size);
 278     for (juint* slot = start; slot < end; slot += 1) {
 279       assert(*slot == badHeapWordVal,
 280              "Found non badHeapWordValue in pre-allocation check");
 281     }
 282   }
 283 }
 284 #endif
 285 
 286 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 287                                                bool is_tlab,
 288                                                bool first_only) {
 289   HeapWord* res = NULL;
 290 
 291   if (_young_gen->should_allocate(size, is_tlab)) {
 292     res = _young_gen->allocate(size, is_tlab);
 293     if (res != NULL || first_only) {
 294       return res;
 295     }
 296   }
 297 
 298   if (_old_gen->should_allocate(size, is_tlab)) {
 299     res = _old_gen->allocate(size, is_tlab);
 300   }
 301 
 302   return res;
 303 }
 304 
 305 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 306                                          bool* gc_overhead_limit_was_exceeded) {
 307   return collector_policy()->mem_allocate_work(size,
 308                                                false /* is_tlab */,
 309                                                gc_overhead_limit_was_exceeded);
 310 }
 311 
 312 bool GenCollectedHeap::must_clear_all_soft_refs() {
 313   return _gc_cause == GCCause::_last_ditch_collection;
 314 }
 315 
 316 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 317   return UseConcMarkSweepGC &&
 318          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
 319           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
 320 }
 321 
 322 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 323                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 324                                           bool restore_marks_for_biased_locking) {



































































 325   // Timer for individual generations. Last argument is false: no CR
 326   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 327   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 328   // so we can assume here that the next GC id is what we want.
 329   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 330   TraceCollectorStats tcs(gen->counters());
 331   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 332 
 333   size_t prev_used = gen->used();
 334   gen->stat_record()->invocations++;
 335   gen->stat_record()->accumulated_time.start();
 336 
 337   // Must be done anew before each collection because
 338   // a previous collection will do mangling and will
 339   // change top of some spaces.
 340   record_gen_tops_before_GC();
 341 
 342   if (PrintGC && Verbose) {
 343     gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
 344                         gen->level(),
 345                         gen->stat_record()->invocations,
 346                         size * HeapWordSize);
 347   }
 348 
 349   if (run_verification && VerifyBeforeGC) {

 350     HandleMark hm;  // Discard invalid handles created during verification




 351     Universe::verify(" VerifyBeforeGC:");
 352   }
 353   COMPILER2_PRESENT(DerivedPointerTable::clear());
 354 
 355   if (restore_marks_for_biased_locking) {

 356     // We perform this mark word preservation work lazily
 357     // because it's only at this point that we know whether we
 358     // absolutely have to do it; we want to avoid doing it for
 359     // scavenge-only collections where it's unnecessary

 360     BiasedLocking::preserve_marks();
 361   }
 362 
 363   // Do collection work
 364   {
 365     // Note on ref discovery: For what appear to be historical reasons,
 366     // GCH enables and disabled (by enqueing) refs discovery.
 367     // In the future this should be moved into the generation's
 368     // collect method so that ref discovery and enqueueing concerns
 369     // are local to a generation. The collect method could return
 370     // an appropriate indication in the case that notification on
 371     // the ref lock was needed. This will make the treatment of
 372     // weak refs more uniform (and indeed remove such concerns
 373     // from GCH). XXX
 374 
 375     HandleMark hm;  // Discard invalid handles created during gc
 376     save_marks();   // save marks for all gens
 377     // We want to discover references, but not process them yet.
 378     // This mode is disabled in process_discovered_references if the
 379     // generation does some collection work, or in
 380     // enqueue_discovered_references if the generation returns
 381     // without doing any work.
 382     ReferenceProcessor* rp = gen->ref_processor();
 383     // If the discovery of ("weak") refs in this generation is
 384     // atomic wrt other collectors in this configuration, we
 385     // are guaranteed to have empty discovered ref lists.
 386     if (rp->discovery_is_atomic()) {
 387       rp->enable_discovery();
 388       rp->setup_policy(clear_soft_refs);
 389     } else {
 390       // collect() below will enable discovery as appropriate
 391     }
 392     gen->collect(full, clear_soft_refs, size, is_tlab);
 393     if (!rp->enqueuing_is_done()) {
 394       rp->enqueue_discovered_references();
 395     } else {
 396       rp->set_enqueuing_is_done(false);
 397     }
 398     rp->verify_no_references_recorded();
 399   }










 400 
 401   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 402 
 403   gen->stat_record()->accumulated_time.stop();
 404 
 405   update_gc_stats(gen->level(), full);
 406 
 407   if (run_verification && VerifyAfterGC) {

 408     HandleMark hm;  // Discard invalid handles created during verification
 409     Universe::verify(" VerifyAfterGC:");
 410   }
 411 
 412   if (PrintGCDetails) {
 413     gclog_or_tty->print(":");
 414     gen->print_heap_change(prev_used);
 415   }
 416 }
 417 
 418 void GenCollectedHeap::do_collection(bool   full,
 419                                      bool   clear_all_soft_refs,
 420                                      size_t size,
 421                                      bool   is_tlab,
 422                                      int    max_level) {
 423   ResourceMark rm;
 424   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 425 
 426   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 427   assert(my_thread->is_VM_thread() ||
 428          my_thread->is_ConcurrentGC_thread(),
 429          "incorrect thread type capability");
 430   assert(Heap_lock->is_locked(),
 431          "the requesting thread should have the Heap_lock");
 432   guarantee(!is_gc_active(), "collection is not reentrant");
 433   assert(max_level < n_gens(), "sanity check");
 434 
 435   if (GC_locker::check_active_before_gc()) {
 436     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 437   }
 438 
 439   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 440                           collector_policy()->should_clear_all_soft_refs();
 441 
 442   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 443 
 444   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 445 
 446   print_heap_before_gc();
 447 
 448   {
 449     FlagSetting fl(_is_gc_active, true);
 450 
 451     bool complete = full && (max_level == (n_gens()-1));
 452     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 453     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 454     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 455     // so we can assume here that the next GC id is what we want.
 456     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
 457 
 458     gc_prologue(complete);
 459     increment_total_collections(complete);
 460 
 461     size_t gch_prev_used = used();
 462     bool must_restore_marks_for_biased_locking = false;
 463     bool run_verification = total_collections() >= VerifyGCStartAt;
 464 
 465     bool prepared_for_verification = false;
 466     int max_level_collected = 0;
 467     if (!(max_level == 1 && full && _old_gen->full_collects_younger_generations()) &&
 468         _young_gen->should_collect(full, size, is_tlab)) {
 469       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 470         prepare_for_verify();
 471         prepared_for_verification = true;
 472       }
 473       if (_young_gen->performs_in_place_marking()) {
 474         must_restore_marks_for_biased_locking = true;
 475       }
 476       collect_generation(_young_gen,
 477                          full,
 478                          size,
 479                          is_tlab,
 480                          run_verification && VerifyGCLevel <= 0,
 481                          do_clear_all_soft_refs,
 482                          must_restore_marks_for_biased_locking);
 483 
 484       if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 485           size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 486         // Allocation request was met by young GC.
 487         size = 0;
 488       }
 489     }
 490 
 491     if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
 492       if (!complete) {
 493         // The full_collections increment was missed above.
 494         increment_total_full_collections();
 495       }
 496       pre_full_gc_dump(NULL);    // do any pre full gc dumps
 497       if (!prepared_for_verification && run_verification &&
 498           VerifyGCLevel <= 1 && VerifyBeforeGC) {
 499         prepare_for_verify();
 500       }
 501       assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
 502       collect_generation(_old_gen,
 503                          full,
 504                          size,
 505                          is_tlab,
 506                          run_verification && VerifyGCLevel <= 1,
 507                          do_clear_all_soft_refs,
 508                          !must_restore_marks_for_biased_locking);
 509 
 510       must_restore_marks_for_biased_locking = true;
 511       max_level_collected = 1;
 512     }
 513 
 514     // Update "complete" boolean wrt what actually transpired --
 515     // for instance, a promotion failure could have led to
 516     // a whole heap collection.
 517     complete = complete || (max_level_collected == n_gens() - 1);
 518 
 519     if (complete) { // We did a "major" collection
 520       // FIXME: See comment at pre_full_gc_dump call
 521       post_full_gc_dump(NULL);   // do any post full gc dumps
 522     }
 523 
 524     if (PrintGCDetails) {
 525       print_heap_change(gch_prev_used);
 526 
 527       // Print metaspace info for full GC with PrintGCDetails flag.
 528       if (complete) {
 529         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 530       }
 531     }
 532 

 533     // Adjust generation sizes.
 534     if (max_level_collected == 1) {
 535       _old_gen->compute_new_size();
 536     }
 537     _young_gen->compute_new_size();
 538 
 539     if (complete) {
 540       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 541       ClassLoaderDataGraph::purge();
 542       MetaspaceAux::verify_metrics();
 543       // Resize the metaspace capacity after full collections
 544       MetaspaceGC::compute_new_size();
 545       update_full_collections_completed();
 546     }
 547 
 548     // Track memory usage and detect low memory after GC finishes
 549     MemoryService::track_memory_usage();
 550 
 551     gc_epilogue(complete);
 552 
 553     if (must_restore_marks_for_biased_locking) {
 554       BiasedLocking::restore_marks();
 555     }
 556   }
 557 


 574 void GenCollectedHeap::
 575 gen_process_roots(int level,
 576                   bool younger_gens_as_roots,
 577                   bool activate_scope,
 578                   SharedHeap::ScanningOption so,
 579                   OopsInGenClosure* not_older_gens,
 580                   OopsInGenClosure* weak_roots,
 581                   OopsInGenClosure* older_gens,
 582                   CLDClosure* cld_closure,
 583                   CLDClosure* weak_cld_closure,
 584                   CodeBlobClosure* code_closure) {
 585 
 586   // General roots.
 587   SharedHeap::process_roots(activate_scope, so,
 588                             not_older_gens, weak_roots,
 589                             cld_closure, weak_cld_closure,
 590                             code_closure);
 591 
 592   if (younger_gens_as_roots) {
 593     if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 594       if (level == 1) {
 595         not_older_gens->set_generation(_young_gen);
 596         _young_gen->oop_iterate(not_older_gens);
 597       }
 598       not_older_gens->reset_generation();
 599     }
 600   }
 601   // When collection is parallel, all threads get to cooperate to do
 602   // older-gen scanning.
 603   if (level == 0) {
 604     older_gens->set_generation(_old_gen);
 605     rem_set()->younger_refs_iterate(_old_gen, older_gens);
 606     older_gens->reset_generation();
 607   }
 608 
 609   _gen_process_roots_tasks->all_tasks_completed();
 610 }
 611 
 612 void GenCollectedHeap::
 613 gen_process_roots(int level,
 614                   bool younger_gens_as_roots,
 615                   bool activate_scope,
 616                   SharedHeap::ScanningOption so,
 617                   bool only_strong_roots,
 618                   OopsInGenClosure* not_older_gens,
 619                   OopsInGenClosure* older_gens,
 620                   CLDClosure* cld_closure) {
 621 
 622   const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
 623 
 624   bool is_moving_collection = false;
 625   if (level == 0 || is_adjust_phase) {
 626     // young collections are always moving
 627     is_moving_collection = true;
 628   }
 629 
 630   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
 631   CodeBlobClosure* code_closure = &mark_code_closure;
 632 
 633   gen_process_roots(level,
 634                     younger_gens_as_roots,
 635                     activate_scope, so,
 636                     not_older_gens, only_strong_roots ? NULL : not_older_gens,
 637                     older_gens,
 638                     cld_closure, only_strong_roots ? NULL : cld_closure,
 639                     code_closure);
 640 
 641 }
 642 
 643 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 644   SharedHeap::process_weak_roots(root_closure);
 645   // "Local" "weak" refs
 646   _young_gen->ref_processor()->weak_oops_do(root_closure);
 647   _old_gen->ref_processor()->weak_oops_do(root_closure);

 648 }
 649 
 650 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 651 void GenCollectedHeap::                                                 \
 652 oop_since_save_marks_iterate(int level,                                 \
 653                              OopClosureType* cur,                       \
 654                              OopClosureType* older) {                   \
 655   if (level == 0) {                                                     \
 656     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 657     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \
 658   } else {                                                              \
 659     _old_gen->oop_since_save_marks_iterate##nv_suffix(cur);             \
 660   }                                                                     \
 661 }
 662 
 663 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 664 
 665 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 666 
 667 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
 668   if (level == 0 && !_young_gen->no_allocs_since_save_marks()) {
 669     return false;
 670   }
 671   return _old_gen->no_allocs_since_save_marks();
 672 }
 673 
 674 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 675   return _young_gen->supports_inline_contig_alloc();
 676 }
 677 
 678 HeapWord** GenCollectedHeap::top_addr() const {
 679   return _young_gen->top_addr();
 680 }
 681 
 682 HeapWord** GenCollectedHeap::end_addr() const {
 683   return _young_gen->end_addr();
 684 }
 685 
 686 // public collection interfaces
 687 
 688 void GenCollectedHeap::collect(GCCause::Cause cause) {
 689   if (should_do_concurrent_full_gc(cause)) {
 690 #if INCLUDE_ALL_GCS
 691     // mostly concurrent full collection
 692     collect_mostly_concurrent(cause);
 693 #else  // INCLUDE_ALL_GCS
 694     ShouldNotReachHere();
 695 #endif // INCLUDE_ALL_GCS
 696   } else if (cause == GCCause::_wb_young_gc) {
 697     // minor collection for WhiteBox API
 698     collect(cause, 0);
 699   } else {
 700 #ifdef ASSERT
 701   if (cause == GCCause::_scavenge_alot) {
 702     // minor collection only
 703     collect(cause, 0);


 726 }
 727 
 728 // this is the private collection interface
 729 // The Heap_lock is expected to be held on entry.
 730 
 731 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
 732   // Read the GC count while holding the Heap_lock
 733   unsigned int gc_count_before      = total_collections();
 734   unsigned int full_gc_count_before = total_full_collections();
 735   {
 736     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 737     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 738                          cause, max_level);
 739     VMThread::execute(&op);
 740   }
 741 }
 742 
 743 #if INCLUDE_ALL_GCS
 744 bool GenCollectedHeap::create_cms_collector() {
 745 
 746   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
 747          "Unexpected generation kinds");
 748   // Skip two header words in the block content verification
 749   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 750   CMSCollector* collector = new CMSCollector(
 751     (ConcurrentMarkSweepGeneration*)_old_gen,
 752     _rem_set->as_CardTableRS(),
 753     (ConcurrentMarkSweepPolicy*) collector_policy());
 754 
 755   if (collector == NULL || !collector->completed_initialization()) {
 756     if (collector) {
 757       delete collector;  // Be nice in embedded situation
 758     }
 759     vm_shutdown_during_initialization("Could not create CMS collector");
 760     return false;
 761   }
 762   return true;  // success
 763 }
 764 
 765 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 766   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 767 
 768   MutexLocker ml(Heap_lock);
 769   // Read the GC counts while holding the Heap_lock
 770   unsigned int full_gc_count_before = total_full_collections();
 771   unsigned int gc_count_before      = total_collections();


 798                 local_max_level      /* max_level */);
 799   // Hack XXX FIX ME !!!
 800   // A scavenge may not have been attempted, or may have
 801   // been attempted and failed, because the old gen was too full
 802   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
 803       incremental_collection_will_fail(false /* don't consult_young */)) {
 804     if (PrintGCDetails) {
 805       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 806                              "because scavenge failed");
 807     }
 808     // This time allow the old gen to be collected as well
 809     do_collection(true                 /* full */,
 810                   clear_all_soft_refs  /* clear_all_soft_refs */,
 811                   0                    /* size */,
 812                   false                /* is_tlab */,
 813                   n_gens() - 1         /* max_level */);
 814   }
 815 }
 816 
 817 bool GenCollectedHeap::is_in_young(oop p) {
 818   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 819   assert(result == _young_gen->is_in_reserved(p),
 820          err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
 821   return result;
 822 }
 823 
 824 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 825 bool GenCollectedHeap::is_in(const void* p) const {
 826   #ifndef ASSERT
 827   guarantee(VerifyBeforeGC      ||
 828             VerifyDuringGC      ||
 829             VerifyBeforeExit    ||
 830             VerifyDuringStartup ||
 831             PrintAssembly       ||
 832             tty->count() != 0   ||   // already printing
 833             VerifyAfterGC       ||
 834     VMError::fatal_error_in_progress(), "too expensive");
 835 
 836   #endif
 837   // This might be sped up with a cache of the last generation that
 838   // answered yes.
 839   if (_young_gen->is_in(p) || _old_gen->is_in(p)) {
 840     return true;
 841   }
 842   // Otherwise...
 843   return false;
 844 }
 845 
 846 #ifdef ASSERT
 847 // Don't implement this by using is_in_young().  This method is used
 848 // in some cases to check that is_in_young() is correct.
 849 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 850   assert(is_in_reserved(p) || p == NULL,
 851     "Does not work if address is non-null and outside of the heap");
 852   return p < _young_gen->reserved().end() && p != NULL;
 853 }
 854 #endif
 855 
 856 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
 857   _young_gen->oop_iterate(cl);
 858   _old_gen->oop_iterate(cl);

 859 }
 860 
 861 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 862   _young_gen->object_iterate(cl);
 863   _old_gen->object_iterate(cl);

 864 }
 865 
 866 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 867   _young_gen->safe_object_iterate(cl);
 868   _old_gen->safe_object_iterate(cl);

 869 }
 870 
 871 Space* GenCollectedHeap::space_containing(const void* addr) const {
 872   Space* res = _young_gen->space_containing(addr);
 873   if (res != NULL) {
 874     return res;
 875   }
 876   res = _old_gen->space_containing(addr);
 877   assert(res != NULL, "Could not find containing space");
 878   return res;
 879 }
 880 

 881 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 882   assert(is_in_reserved(addr), "block_start of address outside of heap");
 883   if (_young_gen->is_in_reserved(addr)) {
 884     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 885     return _young_gen->block_start(addr);


 886   }
 887 
 888   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 889   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 890   return _old_gen->block_start(addr);
 891 }
 892 
 893 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
 894   assert(is_in_reserved(addr), "block_size of address outside of heap");
 895   if (_young_gen->is_in_reserved(addr)) {
 896     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 897     return _young_gen->block_size(addr);



 898   }
 899 
 900   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 901   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 902   return _old_gen->block_size(addr);
 903 }
 904 
 905 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 906   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 907   assert(block_start(addr) == addr, "addr must be a block start");
 908   if (_young_gen->is_in_reserved(addr)) {
 909     return _young_gen->block_is_obj(addr);

 910   }
 911 
 912   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 913   return _old_gen->block_is_obj(addr);
 914 }
 915 
 916 bool GenCollectedHeap::supports_tlab_allocation() const {
 917   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 918   return _young_gen->supports_tlab_allocation();




 919 }
 920 
 921 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 922   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 923   if (_young_gen->supports_tlab_allocation()) {
 924     return _young_gen->tlab_capacity();

 925   }
 926   return 0;

 927 }
 928 
 929 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 930   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 931   if (_young_gen->supports_tlab_allocation()) {
 932     return _young_gen->tlab_used();


 933   }
 934   return 0;
 935 }
 936 
 937 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 938   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 939   if (_young_gen->supports_tlab_allocation()) {
 940     return _young_gen->unsafe_max_tlab_alloc();


 941   }
 942   return 0;
 943 }
 944 
 945 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
 946   bool gc_overhead_limit_was_exceeded;
 947   return collector_policy()->mem_allocate_work(size /* size */,
 948                                                true /* is_tlab */,
 949                                                &gc_overhead_limit_was_exceeded);
 950 }
 951 
 952 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
 953 // from the list headed by "*prev_ptr".
 954 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
 955   bool first = true;
 956   size_t min_size = 0;   // "first" makes this conceptually infinite.
 957   ScratchBlock **smallest_ptr, *smallest;
 958   ScratchBlock  *cur = *prev_ptr;
 959   while (cur) {
 960     assert(*prev_ptr == cur, "just checking");
 961     if (first || cur->num_words < min_size) {
 962       smallest_ptr = prev_ptr;


 971   *smallest_ptr = smallest->next;
 972   return smallest;
 973 }
 974 
 975 // Sort the scratch block list headed by res into decreasing size order,
 976 // and set "res" to the result.
 977 static void sort_scratch_list(ScratchBlock*& list) {
 978   ScratchBlock* sorted = NULL;
 979   ScratchBlock* unsorted = list;
 980   while (unsorted) {
 981     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
 982     smallest->next  = sorted;
 983     sorted          = smallest;
 984   }
 985   list = sorted;
 986 }
 987 
 988 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
 989                                                size_t max_alloc_words) {
 990   ScratchBlock* res = NULL;
 991   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
 992   _old_gen->contribute_scratch(res, requestor, max_alloc_words);

 993   sort_scratch_list(res);
 994   return res;
 995 }
 996 
 997 void GenCollectedHeap::release_scratch() {
 998   _young_gen->reset_scratch();
 999   _old_gen->reset_scratch();

1000 }
1001 
1002 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1003   void do_generation(Generation* gen) {
1004     gen->prepare_for_verify();
1005   }
1006 };
1007 
1008 void GenCollectedHeap::prepare_for_verify() {
1009   ensure_parsability(false);        // no need to retire TLABs
1010   GenPrepareForVerifyClosure blk;
1011   generation_iterate(&blk, false);
1012 }
1013 

1014 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1015                                           bool old_to_young) {
1016   if (old_to_young) {
1017     cl->do_generation(_old_gen);
1018     cl->do_generation(_young_gen);

1019   } else {
1020     cl->do_generation(_young_gen);
1021     cl->do_generation(_old_gen);

1022   }
1023 }
1024 
1025 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1026   _young_gen->space_iterate(cl, true);
1027   _old_gen->space_iterate(cl, true);

1028 }
1029 
1030 bool GenCollectedHeap::is_maximal_no_gc() const {
1031   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();





1032 }
1033 
1034 void GenCollectedHeap::save_marks() {
1035   _young_gen->save_marks();
1036   _old_gen->save_marks();

1037 }
1038 
1039 GenCollectedHeap* GenCollectedHeap::heap() {
1040   assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1041   assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1042   return _gch;
1043 }
1044 
1045 
1046 void GenCollectedHeap::prepare_for_compaction() {
1047   guarantee(_n_gens = 2, "Wrong number of generations");
1048   Generation* old_gen = _old_gen;
1049   // Start by compacting into same gen.
1050   CompactPoint cp(old_gen);
1051   old_gen->prepare_for_compaction(&cp);
1052   Generation* young_gen = _young_gen;
1053   young_gen->prepare_for_compaction(&cp);
1054 }
1055 
1056 GCStats* GenCollectedHeap::gc_stats(int level) const {
1057   if (level == 0) {
1058     return _young_gen->gc_stats();
1059   } else {
1060     return _old_gen->gc_stats();
1061   }
1062 }
1063 
1064 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {


1065   if (!silent) {
1066     gclog_or_tty->print("%s", _old_gen->name());
1067     gclog_or_tty->print(" ");
1068   }
1069   _old_gen->verify();
1070 
1071   if (!silent) {
1072     gclog_or_tty->print("%s", _young_gen->name());
1073     gclog_or_tty->print(" ");
1074   }
1075   _young_gen->verify();
1076 
1077   if (!silent) {
1078     gclog_or_tty->print("remset ");
1079   }
1080   rem_set()->verify();
1081 }
1082 
1083 void GenCollectedHeap::print_on(outputStream* st) const {
1084   _young_gen->print_on(st);
1085   _old_gen->print_on(st);

1086   MetaspaceAux::print_on(st);
1087 }
1088 
1089 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1090   if (workers() != NULL) {
1091     workers()->threads_do(tc);
1092   }
1093 #if INCLUDE_ALL_GCS
1094   if (UseConcMarkSweepGC) {
1095     ConcurrentMarkSweepThread::threads_do(tc);
1096   }
1097 #endif // INCLUDE_ALL_GCS
1098 }
1099 
1100 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1101 #if INCLUDE_ALL_GCS
1102   if (UseConcMarkSweepGC) {
1103     workers()->print_worker_threads_on(st);
1104     ConcurrentMarkSweepThread::print_all_on(st);
1105   }


< prev index next >