< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




2993 
2994   // weak reference processing has not started yet.
2995   ref_processor()->set_enqueuing_is_done(false);
2996 
2997   // Need to remember all newly created CLDs,
2998   // so that we can guarantee that the remark finds them.
2999   ClassLoaderDataGraph::remember_new_clds(true);
3000 
3001   // Whenever a CLD is found, it will be claimed before proceeding to mark
3002   // the klasses. The claimed marks need to be cleared before marking starts.
3003   ClassLoaderDataGraph::clear_claimed_marks();
3004 
3005   if (CMSPrintEdenSurvivorChunks) {
3006     print_eden_and_survivor_chunk_arrays();
3007   }
3008 
3009   {
3010     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3011     if (CMSParallelInitialMarkEnabled) {
3012       // The parallel version.
3013       FlexibleWorkGang* workers = gch->workers();
3014       assert(workers != NULL, "Need parallel worker threads.");
3015       uint n_workers = workers->active_workers();
3016 
3017       StrongRootsScope srs(n_workers);
3018 
3019       CMSParInitialMarkTask tsk(this, &srs, n_workers);
3020       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3021       if (n_workers > 1) {
3022         workers->run_task(&tsk);
3023       } else {
3024         tsk.work(0);
3025       }
3026     } else {
3027       // The serial version.
3028       CLDToOopClosure cld_closure(&notOlder, true);
3029       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3030 
3031       StrongRootsScope srs(1);
3032 
3033       gch->gen_process_roots(&srs,


4479     gclog_or_tty->print_cr(
4480       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4481       worker_id, _timer.seconds());
4482   }
4483 }
4484 
4485 // Parallel remark task
4486 class CMSParRemarkTask: public CMSParMarkTask {
4487   CompactibleFreeListSpace* _cms_space;
4488 
4489   // The per-thread work queues, available here for stealing.
4490   OopTaskQueueSet*       _task_queues;
4491   ParallelTaskTerminator _term;
4492   StrongRootsScope*      _strong_roots_scope;
4493 
4494  public:
4495   // A value of 0 passed to n_workers will cause the number of
4496   // workers to be taken from the active workers in the work gang.
4497   CMSParRemarkTask(CMSCollector* collector,
4498                    CompactibleFreeListSpace* cms_space,
4499                    uint n_workers, FlexibleWorkGang* workers,
4500                    OopTaskQueueSet* task_queues,
4501                    StrongRootsScope* strong_roots_scope):
4502     CMSParMarkTask("Rescan roots and grey objects in parallel",
4503                    collector, n_workers),
4504     _cms_space(cms_space),
4505     _task_queues(task_queues),
4506     _term(n_workers, task_queues),
4507     _strong_roots_scope(strong_roots_scope) { }
4508 
4509   OopTaskQueueSet* task_queues() { return _task_queues; }
4510 
4511   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4512 
4513   ParallelTaskTerminator* terminator() { return &_term; }
4514   uint n_workers() { return _n_workers; }
4515 
4516   void work(uint worker_id);
4517 
4518  private:
4519   // ... of  dirty cards in old space


5052     assert(pst->valid(), "Error");
5053   }
5054 
5055   // From space
5056   {
5057     SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5058     assert(!pst->valid(), "Clobbering existing data?");
5059     size_t n_tasks = _survivor_chunk_index + 1;
5060     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5061     // Sets the condition for completion of the subtask (how many threads
5062     // need to finish in order to be done).
5063     pst->set_n_threads(n_threads);
5064     pst->set_n_tasks((int)n_tasks);
5065     assert(pst->valid(), "Error");
5066   }
5067 }
5068 
5069 // Parallel version of remark
5070 void CMSCollector::do_remark_parallel() {
5071   GenCollectedHeap* gch = GenCollectedHeap::heap();
5072   FlexibleWorkGang* workers = gch->workers();
5073   assert(workers != NULL, "Need parallel worker threads.");
5074   // Choose to use the number of GC workers most recently set
5075   // into "active_workers".
5076   uint n_workers = workers->active_workers();
5077 
5078   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5079 
5080   StrongRootsScope srs(n_workers);
5081 
5082   CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
5083 
5084   // We won't be iterating over the cards in the card table updating
5085   // the younger_gen cards, so we shouldn't call the following else
5086   // the verification code as well as subsequent younger_refs_iterate
5087   // code would get confused. XXX
5088   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5089 
5090   // The young gen rescan work will not be done as part of
5091   // process_roots (which currently doesn't know how to
5092   // parallelize such a scan), but rather will be broken up into


5227     verify_work_stacks_empty();
5228   }
5229 
5230   // We might have added oops to ClassLoaderData::_handles during the
5231   // concurrent marking phase. These oops point to newly allocated objects
5232   // that are guaranteed to be kept alive. Either by the direct allocation
5233   // code, or when the young collector processes the roots. Hence,
5234   // we don't have to revisit the _handles block during the remark phase.
5235 
5236   verify_work_stacks_empty();
5237   // Restore evacuated mark words, if any, used for overflow list links
5238   if (!CMSOverflowEarlyRestoration) {
5239     restore_preserved_marks_if_any();
5240   }
5241   verify_overflow_empty();
5242 }
5243 
5244 ////////////////////////////////////////////////////////
5245 // Parallel Reference Processing Task Proxy Class
5246 ////////////////////////////////////////////////////////










5247 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5248   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5249   CMSCollector*          _collector;
5250   CMSBitMap*             _mark_bit_map;
5251   const MemRegion        _span;
5252   ProcessTask&           _task;
5253 
5254 public:
5255   CMSRefProcTaskProxy(ProcessTask&     task,
5256                       CMSCollector*    collector,
5257                       const MemRegion& span,
5258                       CMSBitMap*       mark_bit_map,
5259                       AbstractWorkGang* workers,
5260                       OopTaskQueueSet* task_queues):
5261     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5262       task_queues,
5263       workers->active_workers()),
5264     _task(task),
5265     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5266   {


5363       NOT_PRODUCT(num_steals++;)
5364       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5365       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5366       // Do scanning work
5367       obj_to_scan->oop_iterate(keep_alive);
5368       // Loop around, finish this work, and try to steal some more
5369     } else if (terminator()->offer_termination()) {
5370       break;  // nirvana from the infinite cycle
5371     }
5372   }
5373   NOT_PRODUCT(
5374     if (PrintCMSStatistics != 0) {
5375       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5376     }
5377   )
5378 }
5379 
5380 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5381 {
5382   GenCollectedHeap* gch = GenCollectedHeap::heap();
5383   FlexibleWorkGang* workers = gch->workers();
5384   assert(workers != NULL, "Need parallel worker threads.");
5385   CMSRefProcTaskProxy rp_task(task, &_collector,
5386                               _collector.ref_processor()->span(),
5387                               _collector.markBitMap(),
5388                               workers, _collector.task_queues());
5389   workers->run_task(&rp_task);
5390 }
5391 
5392 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5393 {
5394 
5395   GenCollectedHeap* gch = GenCollectedHeap::heap();
5396   FlexibleWorkGang* workers = gch->workers();
5397   assert(workers != NULL, "Need parallel worker threads.");
5398   CMSRefEnqueueTaskProxy enq_task(task);
5399   workers->run_task(&enq_task);
5400 }
5401 
5402 void CMSCollector::refProcessingWork() {
5403   ResourceMark rm;
5404   HandleMark   hm;
5405 
5406   ReferenceProcessor* rp = ref_processor();
5407   assert(rp->span().equals(_span), "Spans should be equal");
5408   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5409   // Process weak references.
5410   rp->setup_policy(false);
5411   verify_work_stacks_empty();
5412 
5413   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5414                                           &_markStack, false /* !preclean */);
5415   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5416                                 _span, &_markBitMap, &_markStack,
5417                                 &cmsKeepAliveClosure, false /* !preclean */);
5418   {
5419     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5420 
5421     ReferenceProcessorStats stats;
5422     if (rp->processing_is_mt()) {
5423       // Set the degree of MT here.  If the discovery is done MT, there
5424       // may have been a different number of threads doing the discovery
5425       // and a different number of discovered lists may have Ref objects.
5426       // That is OK as long as the Reference lists are balanced (see
5427       // balance_all_queues() and balance_queues()).
5428       GenCollectedHeap* gch = GenCollectedHeap::heap();
5429       uint active_workers = ParallelGCThreads;
5430       FlexibleWorkGang* workers = gch->workers();
5431       if (workers != NULL) {
5432         active_workers = workers->active_workers();
5433         // The expectation is that active_workers will have already
5434         // been set to a reasonable value.  If it has not been set,
5435         // investigate.
5436         assert(active_workers > 0, "Should have been set during scavenge");
5437       }
5438       rp->set_active_mt_degree(active_workers);
5439       CMSRefProcTaskExecutor task_executor(*this);
5440       stats = rp->process_discovered_references(&_is_alive_closure,
5441                                         &cmsKeepAliveClosure,
5442                                         &cmsDrainMarkingStackClosure,
5443                                         &task_executor,
5444                                         _gc_timer_cm,
5445                                         _gc_tracer_cm->gc_id());
5446     } else {
5447       stats = rp->process_discovered_references(&_is_alive_closure,
5448                                         &cmsKeepAliveClosure,
5449                                         &cmsDrainMarkingStackClosure,
5450                                         NULL,




2993 
2994   // weak reference processing has not started yet.
2995   ref_processor()->set_enqueuing_is_done(false);
2996 
2997   // Need to remember all newly created CLDs,
2998   // so that we can guarantee that the remark finds them.
2999   ClassLoaderDataGraph::remember_new_clds(true);
3000 
3001   // Whenever a CLD is found, it will be claimed before proceeding to mark
3002   // the klasses. The claimed marks need to be cleared before marking starts.
3003   ClassLoaderDataGraph::clear_claimed_marks();
3004 
3005   if (CMSPrintEdenSurvivorChunks) {
3006     print_eden_and_survivor_chunk_arrays();
3007   }
3008 
3009   {
3010     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3011     if (CMSParallelInitialMarkEnabled) {
3012       // The parallel version.
3013       WorkGang* workers = gch->workers();
3014       assert(workers != NULL, "Need parallel worker threads.");
3015       uint n_workers = workers->active_workers();
3016 
3017       StrongRootsScope srs(n_workers);
3018 
3019       CMSParInitialMarkTask tsk(this, &srs, n_workers);
3020       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3021       if (n_workers > 1) {
3022         workers->run_task(&tsk);
3023       } else {
3024         tsk.work(0);
3025       }
3026     } else {
3027       // The serial version.
3028       CLDToOopClosure cld_closure(&notOlder, true);
3029       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3030 
3031       StrongRootsScope srs(1);
3032 
3033       gch->gen_process_roots(&srs,


4479     gclog_or_tty->print_cr(
4480       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4481       worker_id, _timer.seconds());
4482   }
4483 }
4484 
4485 // Parallel remark task
4486 class CMSParRemarkTask: public CMSParMarkTask {
4487   CompactibleFreeListSpace* _cms_space;
4488 
4489   // The per-thread work queues, available here for stealing.
4490   OopTaskQueueSet*       _task_queues;
4491   ParallelTaskTerminator _term;
4492   StrongRootsScope*      _strong_roots_scope;
4493 
4494  public:
4495   // A value of 0 passed to n_workers will cause the number of
4496   // workers to be taken from the active workers in the work gang.
4497   CMSParRemarkTask(CMSCollector* collector,
4498                    CompactibleFreeListSpace* cms_space,
4499                    uint n_workers, WorkGang* workers,
4500                    OopTaskQueueSet* task_queues,
4501                    StrongRootsScope* strong_roots_scope):
4502     CMSParMarkTask("Rescan roots and grey objects in parallel",
4503                    collector, n_workers),
4504     _cms_space(cms_space),
4505     _task_queues(task_queues),
4506     _term(n_workers, task_queues),
4507     _strong_roots_scope(strong_roots_scope) { }
4508 
4509   OopTaskQueueSet* task_queues() { return _task_queues; }
4510 
4511   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4512 
4513   ParallelTaskTerminator* terminator() { return &_term; }
4514   uint n_workers() { return _n_workers; }
4515 
4516   void work(uint worker_id);
4517 
4518  private:
4519   // ... of  dirty cards in old space


5052     assert(pst->valid(), "Error");
5053   }
5054 
5055   // From space
5056   {
5057     SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5058     assert(!pst->valid(), "Clobbering existing data?");
5059     size_t n_tasks = _survivor_chunk_index + 1;
5060     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5061     // Sets the condition for completion of the subtask (how many threads
5062     // need to finish in order to be done).
5063     pst->set_n_threads(n_threads);
5064     pst->set_n_tasks((int)n_tasks);
5065     assert(pst->valid(), "Error");
5066   }
5067 }
5068 
5069 // Parallel version of remark
5070 void CMSCollector::do_remark_parallel() {
5071   GenCollectedHeap* gch = GenCollectedHeap::heap();
5072   WorkGang* workers = gch->workers();
5073   assert(workers != NULL, "Need parallel worker threads.");
5074   // Choose to use the number of GC workers most recently set
5075   // into "active_workers".
5076   uint n_workers = workers->active_workers();
5077 
5078   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5079 
5080   StrongRootsScope srs(n_workers);
5081 
5082   CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
5083 
5084   // We won't be iterating over the cards in the card table updating
5085   // the younger_gen cards, so we shouldn't call the following else
5086   // the verification code as well as subsequent younger_refs_iterate
5087   // code would get confused. XXX
5088   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5089 
5090   // The young gen rescan work will not be done as part of
5091   // process_roots (which currently doesn't know how to
5092   // parallelize such a scan), but rather will be broken up into


5227     verify_work_stacks_empty();
5228   }
5229 
5230   // We might have added oops to ClassLoaderData::_handles during the
5231   // concurrent marking phase. These oops point to newly allocated objects
5232   // that are guaranteed to be kept alive. Either by the direct allocation
5233   // code, or when the young collector processes the roots. Hence,
5234   // we don't have to revisit the _handles block during the remark phase.
5235 
5236   verify_work_stacks_empty();
5237   // Restore evacuated mark words, if any, used for overflow list links
5238   if (!CMSOverflowEarlyRestoration) {
5239     restore_preserved_marks_if_any();
5240   }
5241   verify_overflow_empty();
5242 }
5243 
5244 ////////////////////////////////////////////////////////
5245 // Parallel Reference Processing Task Proxy Class
5246 ////////////////////////////////////////////////////////
5247 class AbstractGangTaskWOopQueues : public AbstractGangTask {
5248   OopTaskQueueSet*       _queues;
5249   ParallelTaskTerminator _terminator;
5250  public:
5251   AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5252     AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5253   ParallelTaskTerminator* terminator() { return &_terminator; }
5254   OopTaskQueueSet* queues() { return _queues; }
5255 };
5256 
5257 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5258   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5259   CMSCollector*          _collector;
5260   CMSBitMap*             _mark_bit_map;
5261   const MemRegion        _span;
5262   ProcessTask&           _task;
5263 
5264 public:
5265   CMSRefProcTaskProxy(ProcessTask&     task,
5266                       CMSCollector*    collector,
5267                       const MemRegion& span,
5268                       CMSBitMap*       mark_bit_map,
5269                       AbstractWorkGang* workers,
5270                       OopTaskQueueSet* task_queues):
5271     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5272       task_queues,
5273       workers->active_workers()),
5274     _task(task),
5275     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5276   {


5373       NOT_PRODUCT(num_steals++;)
5374       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5375       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5376       // Do scanning work
5377       obj_to_scan->oop_iterate(keep_alive);
5378       // Loop around, finish this work, and try to steal some more
5379     } else if (terminator()->offer_termination()) {
5380       break;  // nirvana from the infinite cycle
5381     }
5382   }
5383   NOT_PRODUCT(
5384     if (PrintCMSStatistics != 0) {
5385       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5386     }
5387   )
5388 }
5389 
5390 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5391 {
5392   GenCollectedHeap* gch = GenCollectedHeap::heap();
5393   WorkGang* workers = gch->workers();
5394   assert(workers != NULL, "Need parallel worker threads.");
5395   CMSRefProcTaskProxy rp_task(task, &_collector,
5396                               _collector.ref_processor()->span(),
5397                               _collector.markBitMap(),
5398                               workers, _collector.task_queues());
5399   workers->run_task(&rp_task);
5400 }
5401 
5402 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5403 {
5404 
5405   GenCollectedHeap* gch = GenCollectedHeap::heap();
5406   WorkGang* workers = gch->workers();
5407   assert(workers != NULL, "Need parallel worker threads.");
5408   CMSRefEnqueueTaskProxy enq_task(task);
5409   workers->run_task(&enq_task);
5410 }
5411 
5412 void CMSCollector::refProcessingWork() {
5413   ResourceMark rm;
5414   HandleMark   hm;
5415 
5416   ReferenceProcessor* rp = ref_processor();
5417   assert(rp->span().equals(_span), "Spans should be equal");
5418   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5419   // Process weak references.
5420   rp->setup_policy(false);
5421   verify_work_stacks_empty();
5422 
5423   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5424                                           &_markStack, false /* !preclean */);
5425   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5426                                 _span, &_markBitMap, &_markStack,
5427                                 &cmsKeepAliveClosure, false /* !preclean */);
5428   {
5429     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5430 
5431     ReferenceProcessorStats stats;
5432     if (rp->processing_is_mt()) {
5433       // Set the degree of MT here.  If the discovery is done MT, there
5434       // may have been a different number of threads doing the discovery
5435       // and a different number of discovered lists may have Ref objects.
5436       // That is OK as long as the Reference lists are balanced (see
5437       // balance_all_queues() and balance_queues()).
5438       GenCollectedHeap* gch = GenCollectedHeap::heap();
5439       uint active_workers = ParallelGCThreads;
5440       WorkGang* workers = gch->workers();
5441       if (workers != NULL) {
5442         active_workers = workers->active_workers();
5443         // The expectation is that active_workers will have already
5444         // been set to a reasonable value.  If it has not been set,
5445         // investigate.
5446         assert(active_workers > 0, "Should have been set during scavenge");
5447       }
5448       rp->set_active_mt_degree(active_workers);
5449       CMSRefProcTaskExecutor task_executor(*this);
5450       stats = rp->process_discovered_references(&_is_alive_closure,
5451                                         &cmsKeepAliveClosure,
5452                                         &cmsDrainMarkingStackClosure,
5453                                         &task_executor,
5454                                         _gc_timer_cm,
5455                                         _gc_tracer_cm->gc_id());
5456     } else {
5457       stats = rp->process_discovered_references(&_is_alive_closure,
5458                                         &cmsKeepAliveClosure,
5459                                         &cmsDrainMarkingStackClosure,
5460                                         NULL,


< prev index next >