< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




2999 
3000   // Whenever a CLD is found, it will be claimed before proceeding to mark
3001   // the klasses. The claimed marks need to be cleared before marking starts.
3002   ClassLoaderDataGraph::clear_claimed_marks();
3003 
3004   if (CMSPrintEdenSurvivorChunks) {
3005     print_eden_and_survivor_chunk_arrays();
3006   }
3007 
3008   {
3009     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3010     if (CMSParallelInitialMarkEnabled) {
3011       // The parallel version.
3012       FlexibleWorkGang* workers = gch->workers();
3013       assert(workers != NULL, "Need parallel worker threads.");
3014       uint n_workers = workers->active_workers();
3015 
3016       StrongRootsScope srs(n_workers);
3017 
3018       CMSParInitialMarkTask tsk(this, &srs, n_workers);
3019       gch->set_par_threads(n_workers);
3020       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3021       if (n_workers > 1) {
3022         workers->run_task(&tsk);
3023       } else {
3024         tsk.work(0);
3025       }
3026       gch->set_par_threads(0);
3027     } else {
3028       // The serial version.
3029       CLDToOopClosure cld_closure(&notOlder, true);
3030       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3031 
3032       StrongRootsScope srs(1);
3033 
3034       gch->gen_process_roots(&srs,
3035                              _cmsGen->level(),
3036                              true,   // younger gens are roots
3037                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
3038                              should_unload_classes(),
3039                              &notOlder,
3040                              NULL,
3041                              &cld_closure);
3042     }
3043   }
3044 
3045   // Clear mod-union table; it will be dirtied in the prologue of
3046   // CMS generation per each younger generation collection.


5070 // Parallel version of remark
5071 void CMSCollector::do_remark_parallel() {
5072   GenCollectedHeap* gch = GenCollectedHeap::heap();
5073   FlexibleWorkGang* workers = gch->workers();
5074   assert(workers != NULL, "Need parallel worker threads.");
5075   // Choose to use the number of GC workers most recently set
5076   // into "active_workers".  If active_workers is not set, set it
5077   // to ParallelGCThreads.
5078   uint n_workers = workers->active_workers();
5079   if (n_workers == 0) {
5080     assert(n_workers > 0, "Should have been set during scavenge");
5081     n_workers = ParallelGCThreads;
5082     workers->set_active_workers(n_workers);
5083   }
5084   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5085 
5086   StrongRootsScope srs(n_workers);
5087 
5088   CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
5089 
5090   // Set up for parallel process_roots work.
5091   gch->set_par_threads(n_workers);
5092   // We won't be iterating over the cards in the card table updating
5093   // the younger_gen cards, so we shouldn't call the following else
5094   // the verification code as well as subsequent younger_refs_iterate
5095   // code would get confused. XXX
5096   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5097 
5098   // The young gen rescan work will not be done as part of
5099   // process_roots (which currently doesn't know how to
5100   // parallelize such a scan), but rather will be broken up into
5101   // a set of parallel tasks (via the sampling that the [abortable]
5102   // preclean phase did of eden, plus the [two] tasks of
5103   // scanning the [two] survivor spaces. Further fine-grain
5104   // parallelization of the scanning of the survivor spaces
5105   // themselves, and of precleaning of the younger gen itself
5106   // is deferred to the future.
5107   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5108 
5109   // The dirty card rescan work is broken up into a "sequence"
5110   // of parallel tasks (per constituent space) that are dynamically
5111   // claimed by the parallel threads.
5112   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5113 
5114   // It turns out that even when we're using 1 thread, doing the work in a
5115   // separate thread causes wide variance in run times.  We can't help this
5116   // in the multi-threaded case, but we special-case n=1 here to get
5117   // repeatable measurements of the 1-thread overhead of the parallel code.
5118   if (n_workers > 1) {
5119     // Make refs discovery MT-safe, if it isn't already: it may not
5120     // necessarily be so, since it's possible that we are doing
5121     // ST marking.
5122     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5123     workers->run_task(&tsk);
5124   } else {
5125     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5126     tsk.work(0);
5127   }
5128 
5129   gch->set_par_threads(0);  // 0 ==> non-parallel.
5130   // restore, single-threaded for now, any preserved marks
5131   // as a result of work_q overflow
5132   restore_preserved_marks_if_any();
5133 }
5134 
5135 // Non-parallel version of remark
5136 void CMSCollector::do_remark_non_parallel() {
5137   ResourceMark rm;
5138   HandleMark   hm;
5139   GenCollectedHeap* gch = GenCollectedHeap::heap();
5140   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5141 
5142   MarkRefsIntoAndScanClosure
5143     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5144              &_markStack, this,
5145              false /* should_yield */, false /* not precleaning */);
5146   MarkFromDirtyCardsClosure
5147     markFromDirtyCardsClosure(this, _span,
5148                               NULL,  // space is set further below
5149                               &_markBitMap, &_markStack, &mrias_cl);




2999 
3000   // Whenever a CLD is found, it will be claimed before proceeding to mark
3001   // the klasses. The claimed marks need to be cleared before marking starts.
3002   ClassLoaderDataGraph::clear_claimed_marks();
3003 
3004   if (CMSPrintEdenSurvivorChunks) {
3005     print_eden_and_survivor_chunk_arrays();
3006   }
3007 
3008   {
3009     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3010     if (CMSParallelInitialMarkEnabled) {
3011       // The parallel version.
3012       FlexibleWorkGang* workers = gch->workers();
3013       assert(workers != NULL, "Need parallel worker threads.");
3014       uint n_workers = workers->active_workers();
3015 
3016       StrongRootsScope srs(n_workers);
3017 
3018       CMSParInitialMarkTask tsk(this, &srs, n_workers);

3019       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3020       if (n_workers > 1) {
3021         workers->run_task(&tsk);
3022       } else {
3023         tsk.work(0);
3024       }

3025     } else {
3026       // The serial version.
3027       CLDToOopClosure cld_closure(&notOlder, true);
3028       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3029 
3030       StrongRootsScope srs(1);
3031 
3032       gch->gen_process_roots(&srs,
3033                              _cmsGen->level(),
3034                              true,   // younger gens are roots
3035                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
3036                              should_unload_classes(),
3037                              &notOlder,
3038                              NULL,
3039                              &cld_closure);
3040     }
3041   }
3042 
3043   // Clear mod-union table; it will be dirtied in the prologue of
3044   // CMS generation per each younger generation collection.


5068 // Parallel version of remark
5069 void CMSCollector::do_remark_parallel() {
5070   GenCollectedHeap* gch = GenCollectedHeap::heap();
5071   FlexibleWorkGang* workers = gch->workers();
5072   assert(workers != NULL, "Need parallel worker threads.");
5073   // Choose to use the number of GC workers most recently set
5074   // into "active_workers".  If active_workers is not set, set it
5075   // to ParallelGCThreads.
5076   uint n_workers = workers->active_workers();
5077   if (n_workers == 0) {
5078     assert(n_workers > 0, "Should have been set during scavenge");
5079     n_workers = ParallelGCThreads;
5080     workers->set_active_workers(n_workers);
5081   }
5082   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5083 
5084   StrongRootsScope srs(n_workers);
5085 
5086   CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
5087 


5088   // We won't be iterating over the cards in the card table updating
5089   // the younger_gen cards, so we shouldn't call the following else
5090   // the verification code as well as subsequent younger_refs_iterate
5091   // code would get confused. XXX
5092   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5093 
5094   // The young gen rescan work will not be done as part of
5095   // process_roots (which currently doesn't know how to
5096   // parallelize such a scan), but rather will be broken up into
5097   // a set of parallel tasks (via the sampling that the [abortable]
5098   // preclean phase did of eden, plus the [two] tasks of
5099   // scanning the [two] survivor spaces. Further fine-grain
5100   // parallelization of the scanning of the survivor spaces
5101   // themselves, and of precleaning of the younger gen itself
5102   // is deferred to the future.
5103   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5104 
5105   // The dirty card rescan work is broken up into a "sequence"
5106   // of parallel tasks (per constituent space) that are dynamically
5107   // claimed by the parallel threads.
5108   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5109 
5110   // It turns out that even when we're using 1 thread, doing the work in a
5111   // separate thread causes wide variance in run times.  We can't help this
5112   // in the multi-threaded case, but we special-case n=1 here to get
5113   // repeatable measurements of the 1-thread overhead of the parallel code.
5114   if (n_workers > 1) {
5115     // Make refs discovery MT-safe, if it isn't already: it may not
5116     // necessarily be so, since it's possible that we are doing
5117     // ST marking.
5118     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5119     workers->run_task(&tsk);
5120   } else {
5121     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5122     tsk.work(0);
5123   }
5124 

5125   // restore, single-threaded for now, any preserved marks
5126   // as a result of work_q overflow
5127   restore_preserved_marks_if_any();
5128 }
5129 
5130 // Non-parallel version of remark
5131 void CMSCollector::do_remark_non_parallel() {
5132   ResourceMark rm;
5133   HandleMark   hm;
5134   GenCollectedHeap* gch = GenCollectedHeap::heap();
5135   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5136 
5137   MarkRefsIntoAndScanClosure
5138     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5139              &_markStack, this,
5140              false /* should_yield */, false /* not precleaning */);
5141   MarkFromDirtyCardsClosure
5142     markFromDirtyCardsClosure(this, _span,
5143                               NULL,  // space is set further below
5144                               &_markBitMap, &_markStack, &mrias_cl);


< prev index next >