< prev index next >

src/share/vm/gc/shared/genCollectedHeap.cpp

Print this page




 446   print_heap_before_gc();
 447 
 448   {
 449     FlagSetting fl(_is_gc_active, true);
 450 
 451     bool complete = full && (max_generation == OldGen);
 452     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 453     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 454     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 455     // so we can assume here that the next GC id is what we want.
 456     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
 457 
 458     gc_prologue(complete);
 459     increment_total_collections(complete);
 460 
 461     size_t gch_prev_used = used();
 462     bool run_verification = total_collections() >= VerifyGCStartAt;
 463 
 464     bool prepared_for_verification = false;
 465     bool collected_old = false;
 466     bool old_collects_young = complete &&
 467                               _old_gen->full_collects_young_generation();
 468     if (!old_collects_young &&
 469         _young_gen->should_collect(full, size, is_tlab)) {
 470       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 471         prepare_for_verify();
 472         prepared_for_verification = true;
 473       }
 474 
 475       assert(!_young_gen->performs_in_place_marking(), "No young generation do in place marking");
 476       collect_generation(_young_gen,
 477                          full,
 478                          size,
 479                          is_tlab,
 480                          run_verification && VerifyGCLevel <= 0,
 481                          do_clear_all_soft_refs,
 482                          false);
 483 
 484       if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 485           size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 486         // Allocation request was met by young GC.
 487         size = 0;
 488       }
 489     }


 649     if (so & SO_ScavengeCodeCache) {
 650       assert(code_roots != NULL, "must supply closure for code cache");
 651 
 652       // We only visit parts of the CodeCache when scavenging.
 653       CodeCache::scavenge_root_nmethods_do(code_roots);
 654     }
 655     if (so & SO_AllCodeCache) {
 656       assert(code_roots != NULL, "must supply closure for code cache");
 657 
 658       // CMSCollector uses this to do intermediate-strength collections.
 659       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 660       CodeCache::blobs_do(code_roots);
 661     }
 662     // Verify that the code cache contents are not subject to
 663     // movement by a scavenging collection.
 664     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 665     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 666   }
 667 }
 668 
 669 void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
 670                                          GenerationType type,





















 671                                          bool young_gen_as_roots,
 672                                          ScanningOption so,
 673                                          bool only_strong_roots,
 674                                          OopsInGenClosure* not_older_gens,
 675                                          OopsInGenClosure* older_gens,
 676                                          CLDClosure* cld_closure) {
 677   const bool is_adjust_phase = !only_strong_roots && !young_gen_as_roots;
 678 
 679   bool is_moving_collection = false;
 680   if (type == YoungGen || is_adjust_phase) {
 681     // young collections are always moving
 682     is_moving_collection = true;
 683   }
 684 
 685   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
 686   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
 687   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 688 
 689   process_roots(scope, so,
 690                 not_older_gens, weak_roots,
 691                 cld_closure, weak_cld_closure,
 692                 &mark_code_closure);
 693 
 694   if (young_gen_as_roots) {
 695     if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 696       if (type == OldGen) {
 697         not_older_gens->set_generation(_young_gen);
 698         _young_gen->oop_iterate(not_older_gens);
 699       }
 700       not_older_gens->reset_generation();
 701     }
 702   }
 703   // When collection is parallel, all threads get to cooperate to do
 704   // old generation scanning.
 705   if (type == YoungGen) {
 706     older_gens->set_generation(_old_gen);
 707     rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads());
 708     older_gens->reset_generation();
 709   }
 710 
 711   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 712 }
 713 
 714 
 715 class AlwaysTrueClosure: public BoolObjectClosure {
 716 public:
 717   bool do_object_b(oop p) { return true; }
 718 };
 719 static AlwaysTrueClosure always_true;
 720 
 721 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 722   JNIHandles::weak_oops_do(&always_true, root_closure);
 723   _young_gen->ref_processor()->weak_oops_do(root_closure);
 724   _old_gen->ref_processor()->weak_oops_do(root_closure);
 725 }
 726 
 727 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 728 void GenCollectedHeap::                                                 \
 729 oop_since_save_marks_iterate(GenerationType gen,                        \
 730                              OopClosureType* cur,                       \
 731                              OopClosureType* older) {                   \
 732   if (gen == YoungGen) {                              \
 733     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 734     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \


1091 
1092 void GenCollectedHeap::save_marks() {
1093   _young_gen->save_marks();
1094   _old_gen->save_marks();
1095 }
1096 
1097 GenCollectedHeap* GenCollectedHeap::heap() {
1098   CollectedHeap* heap = Universe::heap();
1099   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1100   assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1101   return (GenCollectedHeap*)heap;
1102 }
1103 
1104 void GenCollectedHeap::prepare_for_compaction() {
1105   // Start by compacting into same gen.
1106   CompactPoint cp(_old_gen);
1107   _old_gen->prepare_for_compaction(&cp);
1108   _young_gen->prepare_for_compaction(&cp);
1109 }
1110 
1111 GCStats* GenCollectedHeap::gc_stats(Generation* gen) const {
1112   return gen->gc_stats();
1113 }
1114 
1115 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1116   if (!silent) {
1117     gclog_or_tty->print("%s", _old_gen->name());
1118     gclog_or_tty->print(" ");
1119   }
1120   _old_gen->verify();
1121 
1122   if (!silent) {
1123     gclog_or_tty->print("%s", _young_gen->name());
1124     gclog_or_tty->print(" ");
1125   }
1126   _young_gen->verify();
1127 
1128   if (!silent) {
1129     gclog_or_tty->print("remset ");
1130   }
1131   rem_set()->verify();
1132 }
1133 
1134 void GenCollectedHeap::print_on(outputStream* st) const {




 446   print_heap_before_gc();
 447 
 448   {
 449     FlagSetting fl(_is_gc_active, true);
 450 
 451     bool complete = full && (max_generation == OldGen);
 452     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
 453     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 454     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
 455     // so we can assume here that the next GC id is what we want.
 456     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
 457 
 458     gc_prologue(complete);
 459     increment_total_collections(complete);
 460 
 461     size_t gch_prev_used = used();
 462     bool run_verification = total_collections() >= VerifyGCStartAt;
 463 
 464     bool prepared_for_verification = false;
 465     bool collected_old = false;
 466     bool old_collects_young = complete && !ScavengeBeforeFullGC;
 467 
 468     if (!old_collects_young && _young_gen->should_collect(full, size, is_tlab)) {

 469       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 470         prepare_for_verify();
 471         prepared_for_verification = true;
 472       }
 473 
 474       assert(!_young_gen->performs_in_place_marking(), "No young generation do in place marking");
 475       collect_generation(_young_gen,
 476                          full,
 477                          size,
 478                          is_tlab,
 479                          run_verification && VerifyGCLevel <= 0,
 480                          do_clear_all_soft_refs,
 481                          false);
 482 
 483       if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 484           size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 485         // Allocation request was met by young GC.
 486         size = 0;
 487       }
 488     }


 648     if (so & SO_ScavengeCodeCache) {
 649       assert(code_roots != NULL, "must supply closure for code cache");
 650 
 651       // We only visit parts of the CodeCache when scavenging.
 652       CodeCache::scavenge_root_nmethods_do(code_roots);
 653     }
 654     if (so & SO_AllCodeCache) {
 655       assert(code_roots != NULL, "must supply closure for code cache");
 656 
 657       // CMSCollector uses this to do intermediate-strength collections.
 658       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 659       CodeCache::blobs_do(code_roots);
 660     }
 661     // Verify that the code cache contents are not subject to
 662     // movement by a scavenging collection.
 663     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 664     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 665   }
 666 }
 667 
 668 void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
 669                                            OopsInGenClosure* young_gen_closure,
 670                                            OopsInGenClosure* old_gen_closure,
 671                                            CLDClosure* cld_closure) {
 672   MarkingCodeBlobClosure mark_code_closure(young_gen_closure, true /* young collections are always moving */);
 673 
 674   process_roots(scope, SO_ScavengeCodeCache, young_gen_closure, young_gen_closure,
 675                 cld_closure, cld_closure, &mark_code_closure);
 676 
 677   if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 678     young_gen_closure->reset_generation();
 679   }
 680 
 681   // When collection is parallel, all threads get to cooperate to do
 682   // old generation scanning.
 683   old_gen_closure->set_generation(_old_gen);
 684   rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads());
 685   old_gen_closure->reset_generation();
 686 
 687   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 688 }
 689 
 690 void GenCollectedHeap::old_process_roots(StrongRootsScope* scope,
 691                                          bool young_gen_as_roots,
 692                                          ScanningOption so,
 693                                          bool only_strong_roots,
 694                                          OopsInGenClosure* young_gen_closure,

 695                                          CLDClosure* cld_closure) {
 696   const bool is_adjust_phase = !only_strong_roots && !young_gen_as_roots;
 697 
 698   bool is_moving_collection = false;
 699   if (is_adjust_phase) {

 700     is_moving_collection = true;
 701   }
 702 
 703   MarkingCodeBlobClosure mark_code_closure(young_gen_closure, is_moving_collection);
 704   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : young_gen_closure;
 705   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 706 
 707   process_roots(scope, so, young_gen_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
 708 
 709   if (young_gen_as_roots &&
 710       !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 711     young_gen_closure->set_generation(_young_gen);
 712     _young_gen->oop_iterate(young_gen_closure);
 713     young_gen_closure->reset_generation();













 714   }
 715 
 716   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 717 }
 718 

 719 class AlwaysTrueClosure: public BoolObjectClosure {
 720 public:
 721   bool do_object_b(oop p) { return true; }
 722 };
 723 static AlwaysTrueClosure always_true;
 724 
 725 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 726   JNIHandles::weak_oops_do(&always_true, root_closure);
 727   _young_gen->ref_processor()->weak_oops_do(root_closure);
 728   _old_gen->ref_processor()->weak_oops_do(root_closure);
 729 }
 730 
 731 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 732 void GenCollectedHeap::                                                 \
 733 oop_since_save_marks_iterate(GenerationType gen,                        \
 734                              OopClosureType* cur,                       \
 735                              OopClosureType* older) {                   \
 736   if (gen == YoungGen) {                              \
 737     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 738     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \


1095 
1096 void GenCollectedHeap::save_marks() {
1097   _young_gen->save_marks();
1098   _old_gen->save_marks();
1099 }
1100 
1101 GenCollectedHeap* GenCollectedHeap::heap() {
1102   CollectedHeap* heap = Universe::heap();
1103   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1104   assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1105   return (GenCollectedHeap*)heap;
1106 }
1107 
1108 void GenCollectedHeap::prepare_for_compaction() {
1109   // Start by compacting into same gen.
1110   CompactPoint cp(_old_gen);
1111   _old_gen->prepare_for_compaction(&cp);
1112   _young_gen->prepare_for_compaction(&cp);
1113 }
1114 




1115 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1116   if (!silent) {
1117     gclog_or_tty->print("%s", _old_gen->name());
1118     gclog_or_tty->print(" ");
1119   }
1120   _old_gen->verify();
1121 
1122   if (!silent) {
1123     gclog_or_tty->print("%s", _young_gen->name());
1124     gclog_or_tty->print(" ");
1125   }
1126   _young_gen->verify();
1127 
1128   if (!silent) {
1129     gclog_or_tty->print("remset ");
1130   }
1131   rem_set()->verify();
1132 }
1133 
1134 void GenCollectedHeap::print_on(outputStream* st) const {


< prev index next >