< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page




3054 
3055   // Whenever a CLD is found, it will be claimed before proceeding to mark
3056   // the klasses. The claimed marks need to be cleared before marking starts.
3057   ClassLoaderDataGraph::clear_claimed_marks();
3058 
3059   if (CMSPrintEdenSurvivorChunks) {
3060     print_eden_and_survivor_chunk_arrays();
3061   }
3062 
3063   {
3064     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3065     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3066       // The parallel version.
3067       FlexibleWorkGang* workers = gch->workers();
3068       assert(workers != NULL, "Need parallel worker threads.");
3069       int n_workers = workers->active_workers();
3070       CMSParInitialMarkTask tsk(this, n_workers);
3071       gch->set_par_threads(n_workers);
3072       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3073       if (n_workers > 1) {
3074         GenCollectedHeap::StrongRootsScope srs(gch);
3075         workers->run_task(&tsk);
3076       } else {
3077         GenCollectedHeap::StrongRootsScope srs(gch);
3078         tsk.work(0);
3079       }
3080       gch->set_par_threads(0);
3081     } else {
3082       // The serial version.
3083       CLDToOopClosure cld_closure(&notOlder, true);
3084       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3085       gch->gen_process_roots(_cmsGen->level(),
3086                              true,   // younger gens are roots
3087                              true,   // activate StrongRootsScope
3088                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
3089                              should_unload_classes(),
3090                              &notOlder,
3091                              NULL,
3092                              &cld_closure);
3093     }
3094   }
3095 
3096   // Clear mod-union table; it will be dirtied in the prologue of
3097   // CMS generation per each younger generation collection.


5152   // scanning the [two] survivor spaces. Further fine-grain
5153   // parallelization of the scanning of the survivor spaces
5154   // themselves, and of precleaning of the younger gen itself
5155   // is deferred to the future.
5156   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5157 
5158   // The dirty card rescan work is broken up into a "sequence"
5159   // of parallel tasks (per constituent space) that are dynamically
5160   // claimed by the parallel threads.
5161   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5162 
5163   // It turns out that even when we're using 1 thread, doing the work in a
5164   // separate thread causes wide variance in run times.  We can't help this
5165   // in the multi-threaded case, but we special-case n=1 here to get
5166   // repeatable measurements of the 1-thread overhead of the parallel code.
5167   if (n_workers > 1) {
5168     // Make refs discovery MT-safe, if it isn't already: it may not
5169     // necessarily be so, since it's possible that we are doing
5170     // ST marking.
5171     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5172     GenCollectedHeap::StrongRootsScope srs(gch);
5173     workers->run_task(&tsk);
5174   } else {
5175     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5176     GenCollectedHeap::StrongRootsScope srs(gch);
5177     tsk.work(0);
5178   }
5179 
5180   gch->set_par_threads(0);  // 0 ==> non-parallel.
5181   // restore, single-threaded for now, any preserved marks
5182   // as a result of work_q overflow
5183   restore_preserved_marks_if_any();
5184 }
5185 
5186 // Non-parallel version of remark
5187 void CMSCollector::do_remark_non_parallel() {
5188   ResourceMark rm;
5189   HandleMark   hm;
5190   GenCollectedHeap* gch = GenCollectedHeap::heap();
5191   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5192 
5193   MarkRefsIntoAndScanClosure
5194     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5195              &_markStack, this,
5196              false /* should_yield */, false /* not precleaning */);


5224       _modUnionTable.dirty_range_iterate_clear(cms_span,
5225                                                &markFromDirtyCardsClosure);
5226       verify_work_stacks_empty();
5227       if (PrintCMSStatistics != 0) {
5228         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5229           markFromDirtyCardsClosure.num_dirty_cards());
5230       }
5231     }
5232   }
5233   if (VerifyDuringGC &&
5234       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5235     HandleMark hm;  // Discard invalid handles created during verification
5236     Universe::verify();
5237   }
5238   {
5239     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5240 
5241     verify_work_stacks_empty();
5242 
5243     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5244     GenCollectedHeap::StrongRootsScope srs(gch);
5245 
5246     gch->gen_process_roots(_cmsGen->level(),
5247                            true,  // younger gens as roots
5248                            false, // use the local StrongRootsScope
5249                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
5250                            should_unload_classes(),
5251                            &mrias_cl,
5252                            NULL,
5253                            NULL); // The dirty klasses will be handled below
5254 
5255     assert(should_unload_classes()
5256            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5257            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5258   }
5259 
5260   {
5261     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5262 
5263     verify_work_stacks_empty();
5264 




3054 
3055   // Whenever a CLD is found, it will be claimed before proceeding to mark
3056   // the klasses. The claimed marks need to be cleared before marking starts.
3057   ClassLoaderDataGraph::clear_claimed_marks();
3058 
3059   if (CMSPrintEdenSurvivorChunks) {
3060     print_eden_and_survivor_chunk_arrays();
3061   }
3062 
3063   {
3064     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3065     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3066       // The parallel version.
3067       FlexibleWorkGang* workers = gch->workers();
3068       assert(workers != NULL, "Need parallel worker threads.");
3069       int n_workers = workers->active_workers();
3070       CMSParInitialMarkTask tsk(this, n_workers);
3071       gch->set_par_threads(n_workers);
3072       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3073       if (n_workers > 1) {
3074         GenCollectedHeap::StrongRootsScope srs;
3075         workers->run_task(&tsk);
3076       } else {
3077         GenCollectedHeap::StrongRootsScope srs;
3078         tsk.work(0);
3079       }
3080       gch->set_par_threads(0);
3081     } else {
3082       // The serial version.
3083       CLDToOopClosure cld_closure(&notOlder, true);
3084       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3085       gch->gen_process_roots(_cmsGen->level(),
3086                              true,   // younger gens are roots
3087                              true,   // activate StrongRootsScope
3088                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
3089                              should_unload_classes(),
3090                              &notOlder,
3091                              NULL,
3092                              &cld_closure);
3093     }
3094   }
3095 
3096   // Clear mod-union table; it will be dirtied in the prologue of
3097   // CMS generation per each younger generation collection.


5152   // scanning the [two] survivor spaces. Further fine-grain
5153   // parallelization of the scanning of the survivor spaces
5154   // themselves, and of precleaning of the younger gen itself
5155   // is deferred to the future.
5156   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5157 
5158   // The dirty card rescan work is broken up into a "sequence"
5159   // of parallel tasks (per constituent space) that are dynamically
5160   // claimed by the parallel threads.
5161   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5162 
5163   // It turns out that even when we're using 1 thread, doing the work in a
5164   // separate thread causes wide variance in run times.  We can't help this
5165   // in the multi-threaded case, but we special-case n=1 here to get
5166   // repeatable measurements of the 1-thread overhead of the parallel code.
5167   if (n_workers > 1) {
5168     // Make refs discovery MT-safe, if it isn't already: it may not
5169     // necessarily be so, since it's possible that we are doing
5170     // ST marking.
5171     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5172     GenCollectedHeap::StrongRootsScope srs;
5173     workers->run_task(&tsk);
5174   } else {
5175     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5176     GenCollectedHeap::StrongRootsScope srs;
5177     tsk.work(0);
5178   }
5179 
5180   gch->set_par_threads(0);  // 0 ==> non-parallel.
5181   // restore, single-threaded for now, any preserved marks
5182   // as a result of work_q overflow
5183   restore_preserved_marks_if_any();
5184 }
5185 
5186 // Non-parallel version of remark
5187 void CMSCollector::do_remark_non_parallel() {
5188   ResourceMark rm;
5189   HandleMark   hm;
5190   GenCollectedHeap* gch = GenCollectedHeap::heap();
5191   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5192 
5193   MarkRefsIntoAndScanClosure
5194     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5195              &_markStack, this,
5196              false /* should_yield */, false /* not precleaning */);


5224       _modUnionTable.dirty_range_iterate_clear(cms_span,
5225                                                &markFromDirtyCardsClosure);
5226       verify_work_stacks_empty();
5227       if (PrintCMSStatistics != 0) {
5228         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5229           markFromDirtyCardsClosure.num_dirty_cards());
5230       }
5231     }
5232   }
5233   if (VerifyDuringGC &&
5234       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5235     HandleMark hm;  // Discard invalid handles created during verification
5236     Universe::verify();
5237   }
5238   {
5239     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5240 
5241     verify_work_stacks_empty();
5242 
5243     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5244     GenCollectedHeap::StrongRootsScope srs;
5245 
5246     gch->gen_process_roots(_cmsGen->level(),
5247                            true,  // younger gens as roots
5248                            false, // use the local StrongRootsScope
5249                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
5250                            should_unload_classes(),
5251                            &mrias_cl,
5252                            NULL,
5253                            NULL); // The dirty klasses will be handled below
5254 
5255     assert(should_unload_classes()
5256            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5257            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5258   }
5259 
5260   {
5261     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5262 
5263     verify_work_stacks_empty();
5264 


< prev index next >