< prev index next >

src/share/vm/gc/shared/genCollectedHeap.cpp

Print this page




 110 
 111   char* heap_address;
 112   ReservedSpace heap_rs;
 113 
 114   size_t heap_alignment = collector_policy()->heap_alignment();
 115 
 116   heap_address = allocate(heap_alignment, &heap_rs);
 117 
 118   if (!heap_rs.is_reserved()) {
 119     vm_shutdown_during_initialization(
 120       "Could not reserve enough space for object heap");
 121     return JNI_ENOMEM;
 122   }
 123 
 124   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 125 
 126   _rem_set = collector_policy()->create_rem_set(reserved_region());
 127   set_barrier_set(rem_set()->bs());
 128 
 129   ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
 130   _young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set());
 131   heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
 132 
 133   ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
 134   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, 1, rem_set());
 135   clear_incremental_collection_failed();
 136 
 137 #if INCLUDE_ALL_GCS
 138   // If we are running CMS, create the collector responsible
 139   // for collecting the CMS generations.
 140   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 141     bool success = create_cms_collector();
 142     if (!success) return JNI_ENOMEM;
 143   }
 144 #endif // INCLUDE_ALL_GCS
 145 
 146   return JNI_OK;
 147 }
 148 
 149 char* GenCollectedHeap::allocate(size_t alignment,
 150                                  ReservedSpace* heap_rs){
 151   // Now figure out the total size.
 152   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 153   assert(alignment % pageSize == 0, "Must be");
 154 


 185 
 186   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 187                                  _old_gen->capacity(),
 188                                  def_new_gen->from()->capacity());
 189   policy->initialize_gc_policy_counters();
 190 }
 191 
 192 void GenCollectedHeap::ref_processing_init() {
 193   _young_gen->ref_processor_init();
 194   _old_gen->ref_processor_init();
 195 }
 196 
 197 size_t GenCollectedHeap::capacity() const {
 198   return _young_gen->capacity() + _old_gen->capacity();
 199 }
 200 
 201 size_t GenCollectedHeap::used() const {
 202   return _young_gen->used() + _old_gen->used();
 203 }
 204 
 205 // Save the "used_region" for generations level and lower.
 206 void GenCollectedHeap::save_used_regions(int level) {
 207   assert(level == 0 || level == 1, "Illegal level parameter");
 208   if (level == 1) {
 209     _old_gen->save_used_region();
 210   }
 211   _young_gen->save_used_region();
 212 }
 213 
 214 size_t GenCollectedHeap::max_capacity() const {
 215   return _young_gen->max_capacity() + _old_gen->max_capacity();
 216 }
 217 
 218 // Update the _full_collections_completed counter
 219 // at the end of a stop-world full GC.
 220 unsigned int GenCollectedHeap::update_full_collections_completed() {
 221   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 222   assert(_full_collections_completed <= _total_full_collections,
 223          "Can't complete more collections than were started");
 224   _full_collections_completed = _total_full_collections;
 225   ml.notify_all();
 226   return _full_collections_completed;
 227 }
 228 
 229 // Update the _full_collections_completed counter, as appropriate,
 230 // at the end of a concurrent GC cycle. Note the conditional update


 313                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 314                                           bool restore_marks_for_biased_locking) {
 315   // Timer for individual generations. Last argument is false: no CR
 316   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 317   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 318   // so we can assume here that the next GC id is what we want.
 319   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 320   TraceCollectorStats tcs(gen->counters());
 321   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 322 
 323   size_t prev_used = gen->used();
 324   gen->stat_record()->invocations++;
 325   gen->stat_record()->accumulated_time.start();
 326 
 327   // Must be done anew before each collection because
 328   // a previous collection will do mangling and will
 329   // change top of some spaces.
 330   record_gen_tops_before_GC();
 331 
 332   if (PrintGC && Verbose) {
 333     gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
 334                         gen->level(),








 335                         gen->stat_record()->invocations,
 336                         size * HeapWordSize);
 337   }
 338 
 339   if (run_verification && VerifyBeforeGC) {
 340     HandleMark hm;  // Discard invalid handles created during verification
 341     Universe::verify(" VerifyBeforeGC:");
 342   }
 343   COMPILER2_PRESENT(DerivedPointerTable::clear());
 344 
 345   if (restore_marks_for_biased_locking) {
 346     // We perform this mark word preservation work lazily
 347     // because it's only at this point that we know whether we
 348     // absolutely have to do it; we want to avoid doing it for
 349     // scavenge-only collections where it's unnecessary
 350     BiasedLocking::preserve_marks();
 351   }
 352 
 353   // Do collection work
 354   {


 375     // are guaranteed to have empty discovered ref lists.
 376     if (rp->discovery_is_atomic()) {
 377       rp->enable_discovery();
 378       rp->setup_policy(clear_soft_refs);
 379     } else {
 380       // collect() below will enable discovery as appropriate
 381     }
 382     gen->collect(full, clear_soft_refs, size, is_tlab);
 383     if (!rp->enqueuing_is_done()) {
 384       rp->enqueue_discovered_references();
 385     } else {
 386       rp->set_enqueuing_is_done(false);
 387     }
 388     rp->verify_no_references_recorded();
 389   }
 390 
 391   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 392 
 393   gen->stat_record()->accumulated_time.stop();
 394 
 395   update_gc_stats(gen->level(), full);
 396 
 397   if (run_verification && VerifyAfterGC) {
 398     HandleMark hm;  // Discard invalid handles created during verification
 399     Universe::verify(" VerifyAfterGC:");
 400   }
 401 
 402   if (PrintGCDetails) {
 403     gclog_or_tty->print(":");
 404     gen->print_heap_change(prev_used);
 405   }
 406 }
 407 
 408 void GenCollectedHeap::do_collection(bool   full,
 409                                      bool   clear_all_soft_refs,
 410                                      size_t size,
 411                                      bool   is_tlab,
 412                                      int    max_level) {
 413   ResourceMark rm;
 414   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 415 
 416   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 417   assert(my_thread->is_VM_thread() ||
 418          my_thread->is_ConcurrentGC_thread(),
 419          "incorrect thread type capability");
 420   assert(Heap_lock->is_locked(),
 421          "the requesting thread should have the Heap_lock");
 422   guarantee(!is_gc_active(), "collection is not reentrant");
 423 
 424   if (GC_locker::check_active_before_gc()) {
 425     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 426   }
 427 
 428   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 429                           collector_policy()->should_clear_all_soft_refs();
 430 
 431   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 432 
 433   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 434 
 435   print_heap_before_gc();
 436 
 437   {
 438     FlagSetting fl(_is_gc_active, true);
 439 
 440     bool complete = full && (max_level == 1 /* old */);
 441     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 442     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 443     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 444     // so we can assume here that the next GC id is what we want.
 445     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
 446 
 447     gc_prologue(complete);
 448     increment_total_collections(complete);
 449 
 450     size_t gch_prev_used = used();
 451     bool run_verification = total_collections() >= VerifyGCStartAt;
 452 
 453     bool prepared_for_verification = false;
 454     int max_level_collected = 0;
 455     bool old_collects_young = (max_level == 1) &&
 456                               full &&
 457                               _old_gen->full_collects_younger_generations();
 458     if (!old_collects_young &&
 459         _young_gen->should_collect(full, size, is_tlab)) {
 460       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 461         prepare_for_verify();
 462         prepared_for_verification = true;
 463       }
 464 
 465       assert(!_young_gen->performs_in_place_marking(), "No young generation do in place marking");
 466       collect_generation(_young_gen,
 467                          full,
 468                          size,
 469                          is_tlab,
 470                          run_verification && VerifyGCLevel <= 0,
 471                          do_clear_all_soft_refs,
 472                          false);
 473 
 474       if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 475           size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 476         // Allocation request was met by young GC.
 477         size = 0;
 478       }
 479     }
 480 
 481     bool must_restore_marks_for_biased_locking = false;
 482 
 483     if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
 484       if (!complete) {
 485         // The full_collections increment was missed above.
 486         increment_total_full_collections();
 487       }
 488 
 489       pre_full_gc_dump(NULL);    // do any pre full gc dumps
 490 
 491       if (!prepared_for_verification && run_verification &&
 492           VerifyGCLevel <= 1 && VerifyBeforeGC) {
 493         prepare_for_verify();
 494       }
 495 
 496       assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
 497       collect_generation(_old_gen,
 498                          full,
 499                          size,
 500                          is_tlab,
 501                          run_verification && VerifyGCLevel <= 1,
 502                          do_clear_all_soft_refs,
 503                          true);
 504 
 505       must_restore_marks_for_biased_locking = true;
 506       max_level_collected = 1;
 507     }
 508 
 509     // Update "complete" boolean wrt what actually transpired --
 510     // for instance, a promotion failure could have led to
 511     // a whole heap collection.
 512     complete = complete || (max_level_collected == 1 /* old */);
 513 
 514     if (complete) { // We did a "major" collection
 515       // FIXME: See comment at pre_full_gc_dump call
 516       post_full_gc_dump(NULL);   // do any post full gc dumps
 517     }
 518 
 519     if (PrintGCDetails) {
 520       print_heap_change(gch_prev_used);
 521 
 522       // Print metaspace info for full GC with PrintGCDetails flag.
 523       if (complete) {
 524         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 525       }
 526     }
 527 
 528     // Adjust generation sizes.
 529     if (max_level_collected == 1 /* old */) {
 530       _old_gen->compute_new_size();
 531     }
 532     _young_gen->compute_new_size();
 533 
 534     if (complete) {
 535       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 536       ClassLoaderDataGraph::purge();
 537       MetaspaceAux::verify_metrics();
 538       // Resize the metaspace capacity after full collections
 539       MetaspaceGC::compute_new_size();
 540       update_full_collections_completed();
 541     }
 542 
 543     // Track memory usage and detect low memory after GC finishes
 544     MemoryService::track_memory_usage();
 545 
 546     gc_epilogue(complete);
 547 
 548     if (must_restore_marks_for_biased_locking) {
 549       BiasedLocking::restore_marks();


 637 
 638   if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
 639     if (so & SO_ScavengeCodeCache) {
 640       assert(code_roots != NULL, "must supply closure for code cache");
 641 
 642       // We only visit parts of the CodeCache when scavenging.
 643       CodeCache::scavenge_root_nmethods_do(code_roots);
 644     }
 645     if (so & SO_AllCodeCache) {
 646       assert(code_roots != NULL, "must supply closure for code cache");
 647 
 648       // CMSCollector uses this to do intermediate-strength collections.
 649       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 650       CodeCache::blobs_do(code_roots);
 651     }
 652     // Verify that the code cache contents are not subject to
 653     // movement by a scavenging collection.
 654     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 655     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 656   }
 657 
 658 }
 659 
 660 void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
 661                                          int level,
 662                                          bool younger_gens_as_roots,
 663                                          ScanningOption so,
 664                                          bool only_strong_roots,
 665                                          OopsInGenClosure* not_older_gens,
 666                                          OopsInGenClosure* older_gens,
 667                                          CLDClosure* cld_closure) {
 668   const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
 669 
 670   bool is_moving_collection = false;
 671   if (level == 0 || is_adjust_phase) {
 672     // young collections are always moving
 673     is_moving_collection = true;
 674   }
 675 
 676   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
 677   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
 678   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 679 
 680   process_roots(scope, so,
 681                 not_older_gens, weak_roots,
 682                 cld_closure, weak_cld_closure,
 683                 &mark_code_closure);
 684 
 685   if (younger_gens_as_roots) {
 686     if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 687       if (level == 1) {
 688         not_older_gens->set_generation(_young_gen);
 689         _young_gen->oop_iterate(not_older_gens);
 690       }
 691       not_older_gens->reset_generation();
 692     }
 693   }
 694   // When collection is parallel, all threads get to cooperate to do
 695   // older-gen scanning.
 696   if (level == 0) {
 697     older_gens->set_generation(_old_gen);
 698     rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads());
 699     older_gens->reset_generation();
 700   }
 701 
 702   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 703 }
 704 
 705 
 706 class AlwaysTrueClosure: public BoolObjectClosure {
 707 public:
 708   bool do_object_b(oop p) { return true; }
 709 };
 710 static AlwaysTrueClosure always_true;
 711 
 712 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 713   JNIHandles::weak_oops_do(&always_true, root_closure);
 714   _young_gen->ref_processor()->weak_oops_do(root_closure);
 715   _old_gen->ref_processor()->weak_oops_do(root_closure);
 716 }
 717 
 718 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 719 void GenCollectedHeap::                                                 \
 720 oop_since_save_marks_iterate(int level,                                 \
 721                              OopClosureType* cur,                       \
 722                              OopClosureType* older) {                   \
 723   if (level == 0) {                                                     \
 724     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 725     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \
 726   } else {                                                              \
 727     _old_gen->oop_since_save_marks_iterate##nv_suffix(cur);             \
 728   }                                                                     \
 729 }
 730 
 731 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 732 
 733 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 734 
 735 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
 736   if (level == 0 && !_young_gen->no_allocs_since_save_marks()) {
 737     return false;
 738   }
 739   return _old_gen->no_allocs_since_save_marks();
 740 }
 741 
 742 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 743   return _young_gen->supports_inline_contig_alloc();
 744 }
 745 
 746 HeapWord** GenCollectedHeap::top_addr() const {
 747   return _young_gen->top_addr();
 748 }
 749 
 750 HeapWord** GenCollectedHeap::end_addr() const {
 751   return _young_gen->end_addr();
 752 }
 753 
 754 // public collection interfaces
 755 
 756 void GenCollectedHeap::collect(GCCause::Cause cause) {
 757   if (should_do_concurrent_full_gc(cause)) {
 758 #if INCLUDE_ALL_GCS
 759     // mostly concurrent full collection
 760     collect_mostly_concurrent(cause);
 761 #else  // INCLUDE_ALL_GCS
 762     ShouldNotReachHere();
 763 #endif // INCLUDE_ALL_GCS
 764   } else if (cause == GCCause::_wb_young_gc) {
 765     // minor collection for WhiteBox API
 766     collect(cause, 0 /* young */);
 767   } else {
 768 #ifdef ASSERT
 769   if (cause == GCCause::_scavenge_alot) {
 770     // minor collection only
 771     collect(cause, 0 /* young */);
 772   } else {
 773     // Stop-the-world full collection
 774     collect(cause, 1 /* old */);
 775   }
 776 #else
 777     // Stop-the-world full collection
 778     collect(cause, 1 /* old */);
 779 #endif
 780   }
 781 }
 782 
 783 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
 784   // The caller doesn't have the Heap_lock
 785   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 786   MutexLocker ml(Heap_lock);
 787   collect_locked(cause, max_level);
 788 }
 789 
 790 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 791   // The caller has the Heap_lock
 792   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 793   collect_locked(cause, 1 /* old */);
 794 }
 795 
 796 // this is the private collection interface
 797 // The Heap_lock is expected to be held on entry.
 798 
 799 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
 800   // Read the GC count while holding the Heap_lock
 801   unsigned int gc_count_before      = total_collections();
 802   unsigned int full_gc_count_before = total_full_collections();
 803   {
 804     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 805     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 806                          cause, max_level);
 807     VMThread::execute(&op);
 808   }
 809 }
 810 
 811 #if INCLUDE_ALL_GCS
 812 bool GenCollectedHeap::create_cms_collector() {
 813 
 814   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
 815          "Unexpected generation kinds");
 816   // Skip two header words in the block content verification
 817   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 818   CMSCollector* collector = new CMSCollector(
 819     (ConcurrentMarkSweepGeneration*)_old_gen,
 820     _rem_set->as_CardTableRS(),
 821     (ConcurrentMarkSweepPolicy*) collector_policy());
 822 
 823   if (collector == NULL || !collector->completed_initialization()) {
 824     if (collector) {
 825       delete collector;  // Be nice in embedded situation
 826     }


 829   }
 830   return true;  // success
 831 }
 832 
 833 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 834   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 835 
 836   MutexLocker ml(Heap_lock);
 837   // Read the GC counts while holding the Heap_lock
 838   unsigned int full_gc_count_before = total_full_collections();
 839   unsigned int gc_count_before      = total_collections();
 840   {
 841     MutexUnlocker mu(Heap_lock);
 842     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 843     VMThread::execute(&op);
 844   }
 845 }
 846 #endif // INCLUDE_ALL_GCS
 847 
 848 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 849    do_full_collection(clear_all_soft_refs, 1 /* old */);
 850 }
 851 
 852 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 853                                           int max_level) {
 854   int local_max_level;
 855   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 856       gc_cause() == GCCause::_gc_locker) {
 857     local_max_level = 0;
 858   } else {
 859     local_max_level = max_level;
 860   }
 861 
 862   do_collection(true                 /* full */,
 863                 clear_all_soft_refs  /* clear_all_soft_refs */,
 864                 0                    /* size */,
 865                 false                /* is_tlab */,
 866                 local_max_level      /* max_level */);
 867   // Hack XXX FIX ME !!!
 868   // A scavenge may not have been attempted, or may have
 869   // been attempted and failed, because the old gen was too full
 870   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
 871       incremental_collection_will_fail(false /* don't consult_young */)) {
 872     if (PrintGCDetails) {
 873       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 874                              "because scavenge failed");
 875     }
 876     // This time allow the old gen to be collected as well
 877     do_collection(true                 /* full */,
 878                   clear_all_soft_refs  /* clear_all_soft_refs */,
 879                   0                    /* size */,
 880                   false                /* is_tlab */,
 881                   1  /* old */         /* max_level */);
 882   }
 883 }
 884 
 885 bool GenCollectedHeap::is_in_young(oop p) {
 886   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 887   assert(result == _young_gen->is_in_reserved(p),
 888          err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
 889   return result;
 890 }
 891 
 892 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 893 bool GenCollectedHeap::is_in(const void* p) const {
 894   return _young_gen->is_in(p) || _old_gen->is_in(p);
 895 }
 896 
 897 #ifdef ASSERT
 898 // Don't implement this by using is_in_young().  This method is used
 899 // in some cases to check that is_in_young() is correct.
 900 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 901   assert(is_in_reserved(p) || p == NULL,


1084 
1085 void GenCollectedHeap::save_marks() {
1086   _young_gen->save_marks();
1087   _old_gen->save_marks();
1088 }
1089 
1090 GenCollectedHeap* GenCollectedHeap::heap() {
1091   CollectedHeap* heap = Universe::heap();
1092   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1093   assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1094   return (GenCollectedHeap*)heap;
1095 }
1096 
1097 void GenCollectedHeap::prepare_for_compaction() {
1098   // Start by compacting into same gen.
1099   CompactPoint cp(_old_gen);
1100   _old_gen->prepare_for_compaction(&cp);
1101   _young_gen->prepare_for_compaction(&cp);
1102 }
1103 
1104 GCStats* GenCollectedHeap::gc_stats(int level) const {
1105   if (level == 0) {
1106     return _young_gen->gc_stats();
1107   } else {
1108     return _old_gen->gc_stats();
1109   }
1110 }
1111 
1112 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1113   if (!silent) {
1114     gclog_or_tty->print("%s", _old_gen->name());
1115     gclog_or_tty->print(" ");
1116   }
1117   _old_gen->verify();
1118 
1119   if (!silent) {
1120     gclog_or_tty->print("%s", _young_gen->name());
1121     gclog_or_tty->print(" ");
1122   }
1123   _young_gen->verify();
1124 
1125   if (!silent) {
1126     gclog_or_tty->print("remset ");
1127   }
1128   rem_set()->verify();
1129 }


1259   }
1260 }
1261 #endif  // not PRODUCT
1262 
1263 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1264  public:
1265   void do_generation(Generation* gen) {
1266     gen->ensure_parsability();
1267   }
1268 };
1269 
1270 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1271   CollectedHeap::ensure_parsability(retire_tlabs);
1272   GenEnsureParsabilityClosure ep_cl;
1273   generation_iterate(&ep_cl, false);
1274 }
1275 
1276 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1277                                               oop obj,
1278                                               size_t obj_size) {
1279   guarantee(old_gen->level() == 1, "We only get here with an old generation");
1280   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1281   HeapWord* result = NULL;
1282 
1283   result = old_gen->expand_and_allocate(obj_size, false);
1284 
1285   if (result != NULL) {
1286     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1287   }
1288   return oop(result);
1289 }
1290 
1291 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1292   jlong _time;   // in ms
1293   jlong _now;    // in ms
1294 
1295  public:
1296   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1297 
1298   jlong time() { return _time; }
1299 




 110 
 111   char* heap_address;
 112   ReservedSpace heap_rs;
 113 
 114   size_t heap_alignment = collector_policy()->heap_alignment();
 115 
 116   heap_address = allocate(heap_alignment, &heap_rs);
 117 
 118   if (!heap_rs.is_reserved()) {
 119     vm_shutdown_during_initialization(
 120       "Could not reserve enough space for object heap");
 121     return JNI_ENOMEM;
 122   }
 123 
 124   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 125 
 126   _rem_set = collector_policy()->create_rem_set(reserved_region());
 127   set_barrier_set(rem_set()->bs());
 128 
 129   ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
 130   _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
 131   heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
 132 
 133   ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
 134   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
 135   clear_incremental_collection_failed();
 136 
 137 #if INCLUDE_ALL_GCS
 138   // If we are running CMS, create the collector responsible
 139   // for collecting the CMS generations.
 140   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 141     bool success = create_cms_collector();
 142     if (!success) return JNI_ENOMEM;
 143   }
 144 #endif // INCLUDE_ALL_GCS
 145 
 146   return JNI_OK;
 147 }
 148 
 149 char* GenCollectedHeap::allocate(size_t alignment,
 150                                  ReservedSpace* heap_rs){
 151   // Now figure out the total size.
 152   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 153   assert(alignment % pageSize == 0, "Must be");
 154 


 185 
 186   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 187                                  _old_gen->capacity(),
 188                                  def_new_gen->from()->capacity());
 189   policy->initialize_gc_policy_counters();
 190 }
 191 
 192 void GenCollectedHeap::ref_processing_init() {
 193   _young_gen->ref_processor_init();
 194   _old_gen->ref_processor_init();
 195 }
 196 
 197 size_t GenCollectedHeap::capacity() const {
 198   return _young_gen->capacity() + _old_gen->capacity();
 199 }
 200 
 201 size_t GenCollectedHeap::used() const {
 202   return _young_gen->used() + _old_gen->used();
 203 }
 204 
 205 void GenCollectedHeap::save_used_regions() {
 206   _old_gen->save_used_region();




 207   _young_gen->save_used_region();
 208 }
 209 
 210 size_t GenCollectedHeap::max_capacity() const {
 211   return _young_gen->max_capacity() + _old_gen->max_capacity();
 212 }
 213 
 214 // Update the _full_collections_completed counter
 215 // at the end of a stop-world full GC.
 216 unsigned int GenCollectedHeap::update_full_collections_completed() {
 217   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 218   assert(_full_collections_completed <= _total_full_collections,
 219          "Can't complete more collections than were started");
 220   _full_collections_completed = _total_full_collections;
 221   ml.notify_all();
 222   return _full_collections_completed;
 223 }
 224 
 225 // Update the _full_collections_completed counter, as appropriate,
 226 // at the end of a concurrent GC cycle. Note the conditional update


 309                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 310                                           bool restore_marks_for_biased_locking) {
 311   // Timer for individual generations. Last argument is false: no CR
 312   // FIXME: We should try to start the timing earlier to cover more of the GC pause
 313   // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 314   // so we can assume here that the next GC id is what we want.
 315   GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
 316   TraceCollectorStats tcs(gen->counters());
 317   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 318 
 319   size_t prev_used = gen->used();
 320   gen->stat_record()->invocations++;
 321   gen->stat_record()->accumulated_time.start();
 322 
 323   // Must be done anew before each collection because
 324   // a previous collection will do mangling and will
 325   // change top of some spaces.
 326   record_gen_tops_before_GC();
 327 
 328   if (PrintGC && Verbose) {
 329     // I didn't want to change the logging when removing the level concept,
 330     // but I guess this logging could say young/old or something instead of 0/1.
 331     uint level;
 332     if (gen == GenCollectedHeap::heap()->young_gen()) {
 333       level = 0;
 334     } else {
 335       level = 1;
 336     }
 337     gclog_or_tty->print("level=%u invoke=%d size=" SIZE_FORMAT,
 338                         level,
 339                         gen->stat_record()->invocations,
 340                         size * HeapWordSize);
 341   }
 342 
 343   if (run_verification && VerifyBeforeGC) {
 344     HandleMark hm;  // Discard invalid handles created during verification
 345     Universe::verify(" VerifyBeforeGC:");
 346   }
 347   COMPILER2_PRESENT(DerivedPointerTable::clear());
 348 
 349   if (restore_marks_for_biased_locking) {
 350     // We perform this mark word preservation work lazily
 351     // because it's only at this point that we know whether we
 352     // absolutely have to do it; we want to avoid doing it for
 353     // scavenge-only collections where it's unnecessary
 354     BiasedLocking::preserve_marks();
 355   }
 356 
 357   // Do collection work
 358   {


 379     // are guaranteed to have empty discovered ref lists.
 380     if (rp->discovery_is_atomic()) {
 381       rp->enable_discovery();
 382       rp->setup_policy(clear_soft_refs);
 383     } else {
 384       // collect() below will enable discovery as appropriate
 385     }
 386     gen->collect(full, clear_soft_refs, size, is_tlab);
 387     if (!rp->enqueuing_is_done()) {
 388       rp->enqueue_discovered_references();
 389     } else {
 390       rp->set_enqueuing_is_done(false);
 391     }
 392     rp->verify_no_references_recorded();
 393   }
 394 
 395   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 396 
 397   gen->stat_record()->accumulated_time.stop();
 398 
 399   update_gc_stats(gen, full);
 400 
 401   if (run_verification && VerifyAfterGC) {
 402     HandleMark hm;  // Discard invalid handles created during verification
 403     Universe::verify(" VerifyAfterGC:");
 404   }
 405 
 406   if (PrintGCDetails) {
 407     gclog_or_tty->print(":");
 408     gen->print_heap_change(prev_used);
 409   }
 410 }
 411 
 412 void GenCollectedHeap::do_collection(bool             full,
 413                                      bool             clear_all_soft_refs,
 414                                      size_t           size,
 415                                      bool             is_tlab,
 416                                      Generation::Type max_generation) {
 417   ResourceMark rm;
 418   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 419 
 420   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 421   assert(my_thread->is_VM_thread() ||
 422          my_thread->is_ConcurrentGC_thread(),
 423          "incorrect thread type capability");
 424   assert(Heap_lock->is_locked(),
 425          "the requesting thread should have the Heap_lock");
 426   guarantee(!is_gc_active(), "collection is not reentrant");
 427 
 428   if (GC_locker::check_active_before_gc()) {
 429     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 430   }
 431 
 432   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 433                           collector_policy()->should_clear_all_soft_refs();
 434 
 435   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 436 
 437   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 438 
 439   print_heap_before_gc();
 440 
 441   {
 442     FlagSetting fl(_is_gc_active, true);
 443 
 444     bool complete = full && (max_generation == Generation::Old);
 445     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 446     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 447     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 448     // so we can assume here that the next GC id is what we want.
 449     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
 450 
 451     gc_prologue(complete);
 452     increment_total_collections(complete);
 453 
 454     size_t gch_prev_used = used();
 455     bool run_verification = total_collections() >= VerifyGCStartAt;
 456 
 457     bool prepared_for_verification = false;
 458     bool collected_old = false;
 459     bool old_collects_young = complete &&

 460                               _old_gen->full_collects_younger_generations();
 461     if (!old_collects_young &&
 462         _young_gen->should_collect(full, size, is_tlab)) {
 463       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 464         prepare_for_verify();
 465         prepared_for_verification = true;
 466       }
 467 
 468       assert(!_young_gen->performs_in_place_marking(), "No young generation do in place marking");
 469       collect_generation(_young_gen,
 470                          full,
 471                          size,
 472                          is_tlab,
 473                          run_verification && VerifyGCLevel <= 0,
 474                          do_clear_all_soft_refs,
 475                          false);
 476 
 477       if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 478           size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 479         // Allocation request was met by young GC.
 480         size = 0;
 481       }
 482     }
 483 
 484     bool must_restore_marks_for_biased_locking = false;
 485 
 486     if (max_generation == Generation::Old && _old_gen->should_collect(full, size, is_tlab)) {
 487       if (!complete) {
 488         // The full_collections increment was missed above.
 489         increment_total_full_collections();
 490       }
 491 
 492       pre_full_gc_dump(NULL);    // do any pre full gc dumps
 493 
 494       if (!prepared_for_verification && run_verification &&
 495           VerifyGCLevel <= 1 && VerifyBeforeGC) {
 496         prepare_for_verify();
 497       }
 498 
 499       assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
 500       collect_generation(_old_gen,
 501                          full,
 502                          size,
 503                          is_tlab,
 504                          run_verification && VerifyGCLevel <= 1,
 505                          do_clear_all_soft_refs,
 506                          true);
 507 
 508       must_restore_marks_for_biased_locking = true;
 509       collected_old = true;
 510     }
 511 
 512     // Update "complete" boolean wrt what actually transpired --
 513     // for instance, a promotion failure could have led to
 514     // a whole heap collection.
 515     complete = complete || collected_old;
 516 
 517     if (complete) { // We did a "major" collection
 518       // FIXME: See comment at pre_full_gc_dump call
 519       post_full_gc_dump(NULL);   // do any post full gc dumps
 520     }
 521 
 522     if (PrintGCDetails) {
 523       print_heap_change(gch_prev_used);
 524 
 525       // Print metaspace info for full GC with PrintGCDetails flag.
 526       if (complete) {
 527         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 528       }
 529     }
 530 
 531     // Adjust generation sizes.
 532     if (collected_old) {
 533       _old_gen->compute_new_size();
 534     }
 535     _young_gen->compute_new_size();
 536 
 537     if (complete) {
 538       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 539       ClassLoaderDataGraph::purge();
 540       MetaspaceAux::verify_metrics();
 541       // Resize the metaspace capacity after full collections
 542       MetaspaceGC::compute_new_size();
 543       update_full_collections_completed();
 544     }
 545 
 546     // Track memory usage and detect low memory after GC finishes
 547     MemoryService::track_memory_usage();
 548 
 549     gc_epilogue(complete);
 550 
 551     if (must_restore_marks_for_biased_locking) {
 552       BiasedLocking::restore_marks();


 640 
 641   if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
 642     if (so & SO_ScavengeCodeCache) {
 643       assert(code_roots != NULL, "must supply closure for code cache");
 644 
 645       // We only visit parts of the CodeCache when scavenging.
 646       CodeCache::scavenge_root_nmethods_do(code_roots);
 647     }
 648     if (so & SO_AllCodeCache) {
 649       assert(code_roots != NULL, "must supply closure for code cache");
 650 
 651       // CMSCollector uses this to do intermediate-strength collections.
 652       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 653       CodeCache::blobs_do(code_roots);
 654     }
 655     // Verify that the code cache contents are not subject to
 656     // movement by a scavenging collection.
 657     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 658     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 659   }

 660 }
 661 
 662 void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
 663                                          Generation::Type type,
 664                                          bool younger_gens_as_roots,
 665                                          ScanningOption so,
 666                                          bool only_strong_roots,
 667                                          OopsInGenClosure* not_older_gens,
 668                                          OopsInGenClosure* older_gens,
 669                                          CLDClosure* cld_closure) {
 670   const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
 671 
 672   bool is_moving_collection = false;
 673   if (type == Generation::Young || is_adjust_phase) {
 674     // young collections are always moving
 675     is_moving_collection = true;
 676   }
 677 
 678   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
 679   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
 680   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 681 
 682   process_roots(scope, so,
 683                 not_older_gens, weak_roots,
 684                 cld_closure, weak_cld_closure,
 685                 &mark_code_closure);
 686 
 687   if (younger_gens_as_roots) {
 688     if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 689       if (type == Generation::Old) {
 690         not_older_gens->set_generation(_young_gen);
 691         _young_gen->oop_iterate(not_older_gens);
 692       }
 693       not_older_gens->reset_generation();
 694     }
 695   }
 696   // When collection is parallel, all threads get to cooperate to do
 697   // old generation scanning.
 698   if (type == Generation::Young) {
 699     older_gens->set_generation(_old_gen);
 700     rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads());
 701     older_gens->reset_generation();
 702   }
 703 
 704   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 705 }
 706 
 707 
 708 class AlwaysTrueClosure: public BoolObjectClosure {
 709 public:
 710   bool do_object_b(oop p) { return true; }
 711 };
 712 static AlwaysTrueClosure always_true;
 713 
 714 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 715   JNIHandles::weak_oops_do(&always_true, root_closure);
 716   _young_gen->ref_processor()->weak_oops_do(root_closure);
 717   _old_gen->ref_processor()->weak_oops_do(root_closure);
 718 }
 719 
 720 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 721 void GenCollectedHeap::                                                 \
 722 oop_since_save_marks_iterate(Generation::Type gen,                      \
 723                              OopClosureType* cur,                       \
 724                              OopClosureType* older) {                   \
 725   if (gen == Generation::Young) {                                       \
 726     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 727     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \
 728   } else {                                                              \
 729     _old_gen->oop_since_save_marks_iterate##nv_suffix(cur);             \
 730   }                                                                     \
 731 }
 732 
 733 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 734 
 735 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 736 
 737 bool GenCollectedHeap::no_allocs_since_save_marks(bool include_young) {
 738   if (include_young && !_young_gen->no_allocs_since_save_marks()) {
 739     return false;
 740   }
 741   return _old_gen->no_allocs_since_save_marks();
 742 }
 743 
 744 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 745   return _young_gen->supports_inline_contig_alloc();
 746 }
 747 
 748 HeapWord** GenCollectedHeap::top_addr() const {
 749   return _young_gen->top_addr();
 750 }
 751 
 752 HeapWord** GenCollectedHeap::end_addr() const {
 753   return _young_gen->end_addr();
 754 }
 755 
 756 // public collection interfaces
 757 
 758 void GenCollectedHeap::collect(GCCause::Cause cause) {
 759   if (should_do_concurrent_full_gc(cause)) {
 760 #if INCLUDE_ALL_GCS
 761     // mostly concurrent full collection
 762     collect_mostly_concurrent(cause);
 763 #else  // INCLUDE_ALL_GCS
 764     ShouldNotReachHere();
 765 #endif // INCLUDE_ALL_GCS
 766   } else if (cause == GCCause::_wb_young_gc) {
 767     // minor collection for WhiteBox API
 768     collect(cause, Generation::Young);
 769   } else {
 770 #ifdef ASSERT
 771   if (cause == GCCause::_scavenge_alot) {
 772     // minor collection only
 773     collect(cause, Generation::Young);
 774   } else {
 775     // Stop-the-world full collection
 776     collect(cause, Generation::Old);
 777   }
 778 #else
 779     // Stop-the-world full collection
 780     collect(cause, Generation::Old);
 781 #endif
 782   }
 783 }
 784 
 785 void GenCollectedHeap::collect(GCCause::Cause cause, Generation::Type max_generation) {
 786   // The caller doesn't have the Heap_lock
 787   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 788   MutexLocker ml(Heap_lock);
 789   collect_locked(cause, max_generation);
 790 }
 791 
 792 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 793   // The caller has the Heap_lock
 794   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 795   collect_locked(cause, Generation::Old);
 796 }
 797 
 798 // this is the private collection interface
 799 // The Heap_lock is expected to be held on entry.
 800 
 801 void GenCollectedHeap::collect_locked(GCCause::Cause cause, Generation::Type max_generation) {
 802   // Read the GC count while holding the Heap_lock
 803   unsigned int gc_count_before      = total_collections();
 804   unsigned int full_gc_count_before = total_full_collections();
 805   {
 806     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 807     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 808                          cause, max_generation);
 809     VMThread::execute(&op);
 810   }
 811 }
 812 
 813 #if INCLUDE_ALL_GCS
 814 bool GenCollectedHeap::create_cms_collector() {
 815 
 816   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
 817          "Unexpected generation kinds");
 818   // Skip two header words in the block content verification
 819   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 820   CMSCollector* collector = new CMSCollector(
 821     (ConcurrentMarkSweepGeneration*)_old_gen,
 822     _rem_set->as_CardTableRS(),
 823     (ConcurrentMarkSweepPolicy*) collector_policy());
 824 
 825   if (collector == NULL || !collector->completed_initialization()) {
 826     if (collector) {
 827       delete collector;  // Be nice in embedded situation
 828     }


 831   }
 832   return true;  // success
 833 }
 834 
 835 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 836   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 837 
 838   MutexLocker ml(Heap_lock);
 839   // Read the GC counts while holding the Heap_lock
 840   unsigned int full_gc_count_before = total_full_collections();
 841   unsigned int gc_count_before      = total_collections();
 842   {
 843     MutexUnlocker mu(Heap_lock);
 844     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 845     VMThread::execute(&op);
 846   }
 847 }
 848 #endif // INCLUDE_ALL_GCS
 849 
 850 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 851    do_full_collection(clear_all_soft_refs, Generation::Old);
 852 }
 853 
 854 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 855                                           Generation::Type last_generation) {
 856   Generation::Type local_last_generation;
 857   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 858       gc_cause() == GCCause::_gc_locker) {
 859     local_last_generation = Generation::Young;
 860   } else {
 861     local_last_generation = last_generation;
 862   }
 863 
 864   do_collection(true                  /* full */,
 865                 clear_all_soft_refs   /* clear_all_soft_refs */,
 866                 0                     /* size */,
 867                 false                 /* is_tlab */,
 868                 local_last_generation /* last_generation */);
 869   // Hack XXX FIX ME !!!
 870   // A scavenge may not have been attempted, or may have
 871   // been attempted and failed, because the old gen was too full
 872   if (local_last_generation == Generation::Young && gc_cause() == GCCause::_gc_locker &&
 873       incremental_collection_will_fail(false /* don't consult_young */)) {
 874     if (PrintGCDetails) {
 875       gclog_or_tty->print_cr("GC locker: Trying a full collection "
 876                              "because scavenge failed");
 877     }
 878     // This time allow the old gen to be collected as well
 879     do_collection(true                 /* full */,
 880                   clear_all_soft_refs  /* clear_all_soft_refs */,
 881                   0                    /* size */,
 882                   false                /* is_tlab */,
 883                   Generation::Old      /* last_generation */);
 884   }
 885 }
 886 
 887 bool GenCollectedHeap::is_in_young(oop p) {
 888   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 889   assert(result == _young_gen->is_in_reserved(p),
 890          err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
 891   return result;
 892 }
 893 
 894 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 895 bool GenCollectedHeap::is_in(const void* p) const {
 896   return _young_gen->is_in(p) || _old_gen->is_in(p);
 897 }
 898 
 899 #ifdef ASSERT
 900 // Don't implement this by using is_in_young().  This method is used
 901 // in some cases to check that is_in_young() is correct.
 902 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 903   assert(is_in_reserved(p) || p == NULL,


1086 
1087 void GenCollectedHeap::save_marks() {
1088   _young_gen->save_marks();
1089   _old_gen->save_marks();
1090 }
1091 
1092 GenCollectedHeap* GenCollectedHeap::heap() {
1093   CollectedHeap* heap = Universe::heap();
1094   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1095   assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1096   return (GenCollectedHeap*)heap;
1097 }
1098 
1099 void GenCollectedHeap::prepare_for_compaction() {
1100   // Start by compacting into same gen.
1101   CompactPoint cp(_old_gen);
1102   _old_gen->prepare_for_compaction(&cp);
1103   _young_gen->prepare_for_compaction(&cp);
1104 }
1105 
1106 GCStats* GenCollectedHeap::gc_stats(Generation* gen) const {
1107   return gen->gc_stats();




1108 }
1109 
1110 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1111   if (!silent) {
1112     gclog_or_tty->print("%s", _old_gen->name());
1113     gclog_or_tty->print(" ");
1114   }
1115   _old_gen->verify();
1116 
1117   if (!silent) {
1118     gclog_or_tty->print("%s", _young_gen->name());
1119     gclog_or_tty->print(" ");
1120   }
1121   _young_gen->verify();
1122 
1123   if (!silent) {
1124     gclog_or_tty->print("remset ");
1125   }
1126   rem_set()->verify();
1127 }


1257   }
1258 }
1259 #endif  // not PRODUCT
1260 
1261 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1262  public:
1263   void do_generation(Generation* gen) {
1264     gen->ensure_parsability();
1265   }
1266 };
1267 
1268 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1269   CollectedHeap::ensure_parsability(retire_tlabs);
1270   GenEnsureParsabilityClosure ep_cl;
1271   generation_iterate(&ep_cl, false);
1272 }
1273 
1274 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1275                                               oop obj,
1276                                               size_t obj_size) {
1277   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1278   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1279   HeapWord* result = NULL;
1280 
1281   result = old_gen->expand_and_allocate(obj_size, false);
1282 
1283   if (result != NULL) {
1284     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1285   }
1286   return oop(result);
1287 }
1288 
1289 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1290   jlong _time;   // in ms
1291   jlong _now;    // in ms
1292 
1293  public:
1294   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1295 
1296   jlong time() { return _time; }
1297 


< prev index next >