< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page




2888     gclog_or_tty->gclog_stamp(_gc_id);
2889     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2890                  _collector->cmsGen()->short_name(),
2891                  _phase, _collector->timerValue(), _wallclock.seconds());
2892     if (_print_cr) {
2893       gclog_or_tty->cr();
2894     }
2895     if (PrintCMSStatistics != 0) {
2896       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2897                     _collector->yields());
2898     }
2899   }
2900 }
2901 
2902 // CMS work
2903 
2904 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2905 class CMSParMarkTask : public AbstractGangTask {
2906  protected:
2907   CMSCollector*     _collector;
2908   int               _n_workers;
2909   CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
2910       AbstractGangTask(name),
2911       _collector(collector),
2912       _n_workers(n_workers) {}
2913   // Work method in support of parallel rescan ... of young gen spaces
2914   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2915                              ContiguousSpace* space,
2916                              HeapWord** chunk_array, size_t chunk_top);
2917   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2918 };
2919 
2920 // Parallel initial mark task
2921 class CMSParInitialMarkTask: public CMSParMarkTask {
2922  public:
2923   CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
2924       CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
2925                      collector, n_workers) {}
2926   void work(uint worker_id);
2927 };
2928 
2929 // Checkpoint the roots into this generation from outside
2930 // this generation. [Note this initial checkpoint need only
2931 // be approximate -- we'll do a catch up phase subsequently.]
2932 void CMSCollector::checkpointRootsInitial() {
2933   assert(_collectorState == InitialMarking, "Wrong collector state");
2934   check_correct_thread_executing();
2935   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2936 
2937   save_heap_summary();
2938   report_heap_summary(GCWhen::BeforeGC);
2939 
2940   ReferenceProcessor* rp = ref_processor();
2941   assert(_restart_addr == NULL, "Control point invariant");
2942   {
2943     // acquire locks for subsequent manipulations


2992   ref_processor()->set_enqueuing_is_done(false);
2993 
2994   // Need to remember all newly created CLDs,
2995   // so that we can guarantee that the remark finds them.
2996   ClassLoaderDataGraph::remember_new_clds(true);
2997 
2998   // Whenever a CLD is found, it will be claimed before proceeding to mark
2999   // the klasses. The claimed marks need to be cleared before marking starts.
3000   ClassLoaderDataGraph::clear_claimed_marks();
3001 
3002   if (CMSPrintEdenSurvivorChunks) {
3003     print_eden_and_survivor_chunk_arrays();
3004   }
3005 
3006   {
3007     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3008     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3009       // The parallel version.
3010       FlexibleWorkGang* workers = gch->workers();
3011       assert(workers != NULL, "Need parallel worker threads.");
3012       int n_workers = workers->active_workers();
3013       CMSParInitialMarkTask tsk(this, n_workers);
3014       gch->set_par_threads(n_workers);
3015       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3016       if (n_workers > 1) {
3017         StrongRootsScope srs;
3018         workers->run_task(&tsk);
3019       } else {
3020         StrongRootsScope srs;
3021         tsk.work(0);
3022       }
3023       gch->set_par_threads(0);
3024     } else {
3025       // The serial version.
3026       CLDToOopClosure cld_closure(&notOlder, true);
3027       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3028       gch->gen_process_roots(_cmsGen->level(),
3029                              true,   // younger gens are roots
3030                              true,   // activate StrongRootsScope
3031                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
3032                              should_unload_classes(),


3133     ParallelTaskTerminator(n_threads, queue_set),
3134     _collector(collector) { }
3135 
3136   void set_task(CMSConcMarkingTask* task) {
3137     _task = task;
3138   }
3139 };
3140 
3141 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3142   CMSConcMarkingTask* _task;
3143  public:
3144   bool should_exit_termination();
3145   void set_task(CMSConcMarkingTask* task) {
3146     _task = task;
3147   }
3148 };
3149 
3150 // MT Concurrent Marking Task
3151 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3152   CMSCollector* _collector;
3153   int           _n_workers;                  // requested/desired # workers
3154   bool          _result;
3155   CompactibleFreeListSpace*  _cms_space;
3156   char          _pad_front[64];   // padding to ...
3157   HeapWord*     _global_finger;   // ... avoid sharing cache line
3158   char          _pad_back[64];
3159   HeapWord*     _restart_addr;
3160 
3161   //  Exposed here for yielding support
3162   Mutex* const _bit_map_lock;
3163 
3164   // The per thread work queues, available here for stealing
3165   OopTaskQueueSet*  _task_queues;
3166 
3167   // Termination (and yielding) support
3168   CMSConcMarkingTerminator _term;
3169   CMSConcMarkingTerminatorTerminator _term_term;
3170 
3171  public:
3172   CMSConcMarkingTask(CMSCollector* collector,
3173                  CompactibleFreeListSpace* cms_space,


3179     _n_workers(0), _result(true),
3180     _task_queues(task_queues),
3181     _term(_n_workers, task_queues, _collector),
3182     _bit_map_lock(collector->bitMapLock())
3183   {
3184     _requested_size = _n_workers;
3185     _term.set_task(this);
3186     _term_term.set_task(this);
3187     _restart_addr = _global_finger = _cms_space->bottom();
3188   }
3189 
3190 
3191   OopTaskQueueSet* task_queues()  { return _task_queues; }
3192 
3193   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3194 
3195   HeapWord** global_finger_addr() { return &_global_finger; }
3196 
3197   CMSConcMarkingTerminator* terminator() { return &_term; }
3198 
3199   virtual void set_for_termination(int active_workers) {
3200     terminator()->reset_for_reuse(active_workers);
3201   }
3202 
3203   void work(uint worker_id);
3204   bool should_yield() {
3205     return    ConcurrentMarkSweepThread::should_yield()
3206            && !_collector->foregroundGCIsActive();
3207   }
3208 
3209   virtual void coordinator_yield();  // stuff done by coordinator
3210   bool result() { return _result; }
3211 
3212   void reset(HeapWord* ra) {
3213     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3214     _restart_addr = _global_finger = ra;
3215     _term.reset_for_reuse();
3216   }
3217 
3218   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3219                                            OopTaskQueue* work_q);


3625   // should really use wait/notify, which is the recommended
3626   // way of doing this type of interaction. Additionally, we should
3627   // consolidate the eight methods that do the yield operation and they
3628   // are almost identical into one for better maintainability and
3629   // readability. See 6445193.
3630   //
3631   // Tony 2006.06.29
3632   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3633                    ConcurrentMarkSweepThread::should_yield() &&
3634                    !CMSCollector::foregroundGCIsActive(); ++i) {
3635     os::sleep(Thread::current(), 1, false);
3636   }
3637 
3638   ConcurrentMarkSweepThread::synchronize(true);
3639   _bit_map_lock->lock_without_safepoint_check();
3640   _collector->startTimer();
3641 }
3642 
3643 bool CMSCollector::do_marking_mt() {
3644   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3645   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
3646                                        conc_workers()->total_workers(),
3647                                        conc_workers()->active_workers(),
3648                                        Threads::number_of_non_daemon_threads());
3649   conc_workers()->set_active_workers(num_workers);
3650 
3651   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3652 
3653   CMSConcMarkingTask tsk(this,
3654                          cms_space,
3655                          conc_workers(),
3656                          task_queues());
3657 
3658   // Since the actual number of workers we get may be different
3659   // from the number we requested above, do we need to do anything different
3660   // below? In particular, may be we need to subclass the SequantialSubTasksDone
3661   // class?? XXX
3662   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3663 
3664   // Refs discovery is already non-atomic.
3665   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");


4474   if (PrintCMSStatistics != 0) {
4475     gclog_or_tty->print_cr(
4476       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4477       worker_id, _timer.seconds());
4478   }
4479 }
4480 
4481 // Parallel remark task
4482 class CMSParRemarkTask: public CMSParMarkTask {
4483   CompactibleFreeListSpace* _cms_space;
4484 
4485   // The per-thread work queues, available here for stealing.
4486   OopTaskQueueSet*       _task_queues;
4487   ParallelTaskTerminator _term;
4488 
4489  public:
4490   // A value of 0 passed to n_workers will cause the number of
4491   // workers to be taken from the active workers in the work gang.
4492   CMSParRemarkTask(CMSCollector* collector,
4493                    CompactibleFreeListSpace* cms_space,
4494                    int n_workers, FlexibleWorkGang* workers,
4495                    OopTaskQueueSet* task_queues):
4496     CMSParMarkTask("Rescan roots and grey objects in parallel",
4497                    collector, n_workers),
4498     _cms_space(cms_space),
4499     _task_queues(task_queues),
4500     _term(n_workers, task_queues) { }
4501 
4502   OopTaskQueueSet* task_queues() { return _task_queues; }
4503 
4504   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4505 
4506   ParallelTaskTerminator* terminator() { return &_term; }
4507   int n_workers() { return _n_workers; }
4508 
4509   void work(uint worker_id);
4510 
4511  private:
4512   // ... of  dirty cards in old space
4513   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4514                                   Par_MarkRefsIntoAndScanClosure* cl);
4515 
4516   // ... work stealing for the above
4517   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4518 };
4519 
4520 class RemarkKlassClosure : public KlassClosure {
4521   KlassToOopClosure _cm_klass_closure;
4522  public:
4523   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4524   void do_klass(Klass* k) {
4525     // Check if we have modified any oops in the Klass during the concurrent marking.
4526     if (k->has_accumulated_modified_oops()) {
4527       k->clear_accumulated_modified_oops();


5050     SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5051     assert(!pst->valid(), "Clobbering existing data?");
5052     size_t n_tasks = _survivor_chunk_index + 1;
5053     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5054     // Sets the condition for completion of the subtask (how many threads
5055     // need to finish in order to be done).
5056     pst->set_n_threads(n_threads);
5057     pst->set_n_tasks((int)n_tasks);
5058     assert(pst->valid(), "Error");
5059   }
5060 }
5061 
5062 // Parallel version of remark
5063 void CMSCollector::do_remark_parallel() {
5064   GenCollectedHeap* gch = GenCollectedHeap::heap();
5065   FlexibleWorkGang* workers = gch->workers();
5066   assert(workers != NULL, "Need parallel worker threads.");
5067   // Choose to use the number of GC workers most recently set
5068   // into "active_workers".  If active_workers is not set, set it
5069   // to ParallelGCThreads.
5070   int n_workers = workers->active_workers();
5071   if (n_workers == 0) {
5072     assert(n_workers > 0, "Should have been set during scavenge");
5073     n_workers = ParallelGCThreads;
5074     workers->set_active_workers(n_workers);
5075   }
5076   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5077 
5078   CMSParRemarkTask tsk(this,
5079     cms_space,
5080     n_workers, workers, task_queues());
5081 
5082   // Set up for parallel process_roots work.
5083   gch->set_par_threads(n_workers);
5084   // We won't be iterating over the cards in the card table updating
5085   // the younger_gen cards, so we shouldn't call the following else
5086   // the verification code as well as subsequent younger_refs_iterate
5087   // code would get confused. XXX
5088   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5089 
5090   // The young gen rescan work will not be done as part of


5416   // Process weak references.
5417   rp->setup_policy(false);
5418   verify_work_stacks_empty();
5419 
5420   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5421                                           &_markStack, false /* !preclean */);
5422   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5423                                 _span, &_markBitMap, &_markStack,
5424                                 &cmsKeepAliveClosure, false /* !preclean */);
5425   {
5426     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5427 
5428     ReferenceProcessorStats stats;
5429     if (rp->processing_is_mt()) {
5430       // Set the degree of MT here.  If the discovery is done MT, there
5431       // may have been a different number of threads doing the discovery
5432       // and a different number of discovered lists may have Ref objects.
5433       // That is OK as long as the Reference lists are balanced (see
5434       // balance_all_queues() and balance_queues()).
5435       GenCollectedHeap* gch = GenCollectedHeap::heap();
5436       int active_workers = ParallelGCThreads;
5437       FlexibleWorkGang* workers = gch->workers();
5438       if (workers != NULL) {
5439         active_workers = workers->active_workers();
5440         // The expectation is that active_workers will have already
5441         // been set to a reasonable value.  If it has not been set,
5442         // investigate.
5443         assert(active_workers > 0, "Should have been set during scavenge");
5444       }
5445       rp->set_active_mt_degree(active_workers);
5446       CMSRefProcTaskExecutor task_executor(*this);
5447       stats = rp->process_discovered_references(&_is_alive_closure,
5448                                         &cmsKeepAliveClosure,
5449                                         &cmsDrainMarkingStackClosure,
5450                                         &task_executor,
5451                                         _gc_timer_cm,
5452                                         _gc_tracer_cm->gc_id());
5453     } else {
5454       stats = rp->process_discovered_references(&_is_alive_closure,
5455                                         &cmsKeepAliveClosure,
5456                                         &cmsDrainMarkingStackClosure,




2888     gclog_or_tty->gclog_stamp(_gc_id);
2889     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2890                  _collector->cmsGen()->short_name(),
2891                  _phase, _collector->timerValue(), _wallclock.seconds());
2892     if (_print_cr) {
2893       gclog_or_tty->cr();
2894     }
2895     if (PrintCMSStatistics != 0) {
2896       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2897                     _collector->yields());
2898     }
2899   }
2900 }
2901 
2902 // CMS work
2903 
2904 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2905 class CMSParMarkTask : public AbstractGangTask {
2906  protected:
2907   CMSCollector*     _collector;
2908   uint              _n_workers;
2909   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2910       AbstractGangTask(name),
2911       _collector(collector),
2912       _n_workers(n_workers) {}
2913   // Work method in support of parallel rescan ... of young gen spaces
2914   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2915                              ContiguousSpace* space,
2916                              HeapWord** chunk_array, size_t chunk_top);
2917   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2918 };
2919 
2920 // Parallel initial mark task
2921 class CMSParInitialMarkTask: public CMSParMarkTask {
2922  public:
2923   CMSParInitialMarkTask(CMSCollector* collector, uint n_workers) :
2924       CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
2925                      collector, n_workers) {}
2926   void work(uint worker_id);
2927 };
2928 
2929 // Checkpoint the roots into this generation from outside
2930 // this generation. [Note this initial checkpoint need only
2931 // be approximate -- we'll do a catch up phase subsequently.]
2932 void CMSCollector::checkpointRootsInitial() {
2933   assert(_collectorState == InitialMarking, "Wrong collector state");
2934   check_correct_thread_executing();
2935   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2936 
2937   save_heap_summary();
2938   report_heap_summary(GCWhen::BeforeGC);
2939 
2940   ReferenceProcessor* rp = ref_processor();
2941   assert(_restart_addr == NULL, "Control point invariant");
2942   {
2943     // acquire locks for subsequent manipulations


2992   ref_processor()->set_enqueuing_is_done(false);
2993 
2994   // Need to remember all newly created CLDs,
2995   // so that we can guarantee that the remark finds them.
2996   ClassLoaderDataGraph::remember_new_clds(true);
2997 
2998   // Whenever a CLD is found, it will be claimed before proceeding to mark
2999   // the klasses. The claimed marks need to be cleared before marking starts.
3000   ClassLoaderDataGraph::clear_claimed_marks();
3001 
3002   if (CMSPrintEdenSurvivorChunks) {
3003     print_eden_and_survivor_chunk_arrays();
3004   }
3005 
3006   {
3007     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3008     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3009       // The parallel version.
3010       FlexibleWorkGang* workers = gch->workers();
3011       assert(workers != NULL, "Need parallel worker threads.");
3012       uint n_workers = workers->active_workers();
3013       CMSParInitialMarkTask tsk(this, n_workers);
3014       gch->set_par_threads(n_workers);
3015       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3016       if (n_workers > 1) {
3017         StrongRootsScope srs;
3018         workers->run_task(&tsk);
3019       } else {
3020         StrongRootsScope srs;
3021         tsk.work(0);
3022       }
3023       gch->set_par_threads(0);
3024     } else {
3025       // The serial version.
3026       CLDToOopClosure cld_closure(&notOlder, true);
3027       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3028       gch->gen_process_roots(_cmsGen->level(),
3029                              true,   // younger gens are roots
3030                              true,   // activate StrongRootsScope
3031                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
3032                              should_unload_classes(),


3133     ParallelTaskTerminator(n_threads, queue_set),
3134     _collector(collector) { }
3135 
3136   void set_task(CMSConcMarkingTask* task) {
3137     _task = task;
3138   }
3139 };
3140 
3141 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3142   CMSConcMarkingTask* _task;
3143  public:
3144   bool should_exit_termination();
3145   void set_task(CMSConcMarkingTask* task) {
3146     _task = task;
3147   }
3148 };
3149 
3150 // MT Concurrent Marking Task
3151 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3152   CMSCollector* _collector;
3153   uint          _n_workers;                  // requested/desired # workers
3154   bool          _result;
3155   CompactibleFreeListSpace*  _cms_space;
3156   char          _pad_front[64];   // padding to ...
3157   HeapWord*     _global_finger;   // ... avoid sharing cache line
3158   char          _pad_back[64];
3159   HeapWord*     _restart_addr;
3160 
3161   //  Exposed here for yielding support
3162   Mutex* const _bit_map_lock;
3163 
3164   // The per thread work queues, available here for stealing
3165   OopTaskQueueSet*  _task_queues;
3166 
3167   // Termination (and yielding) support
3168   CMSConcMarkingTerminator _term;
3169   CMSConcMarkingTerminatorTerminator _term_term;
3170 
3171  public:
3172   CMSConcMarkingTask(CMSCollector* collector,
3173                  CompactibleFreeListSpace* cms_space,


3179     _n_workers(0), _result(true),
3180     _task_queues(task_queues),
3181     _term(_n_workers, task_queues, _collector),
3182     _bit_map_lock(collector->bitMapLock())
3183   {
3184     _requested_size = _n_workers;
3185     _term.set_task(this);
3186     _term_term.set_task(this);
3187     _restart_addr = _global_finger = _cms_space->bottom();
3188   }
3189 
3190 
3191   OopTaskQueueSet* task_queues()  { return _task_queues; }
3192 
3193   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3194 
3195   HeapWord** global_finger_addr() { return &_global_finger; }
3196 
3197   CMSConcMarkingTerminator* terminator() { return &_term; }
3198 
3199   virtual void set_for_termination(uint active_workers) {
3200     terminator()->reset_for_reuse(active_workers);
3201   }
3202 
3203   void work(uint worker_id);
3204   bool should_yield() {
3205     return    ConcurrentMarkSweepThread::should_yield()
3206            && !_collector->foregroundGCIsActive();
3207   }
3208 
3209   virtual void coordinator_yield();  // stuff done by coordinator
3210   bool result() { return _result; }
3211 
3212   void reset(HeapWord* ra) {
3213     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3214     _restart_addr = _global_finger = ra;
3215     _term.reset_for_reuse();
3216   }
3217 
3218   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3219                                            OopTaskQueue* work_q);


3625   // should really use wait/notify, which is the recommended
3626   // way of doing this type of interaction. Additionally, we should
3627   // consolidate the eight methods that do the yield operation and they
3628   // are almost identical into one for better maintainability and
3629   // readability. See 6445193.
3630   //
3631   // Tony 2006.06.29
3632   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3633                    ConcurrentMarkSweepThread::should_yield() &&
3634                    !CMSCollector::foregroundGCIsActive(); ++i) {
3635     os::sleep(Thread::current(), 1, false);
3636   }
3637 
3638   ConcurrentMarkSweepThread::synchronize(true);
3639   _bit_map_lock->lock_without_safepoint_check();
3640   _collector->startTimer();
3641 }
3642 
3643 bool CMSCollector::do_marking_mt() {
3644   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3645   uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
3646                                         conc_workers()->total_workers(),
3647                                         conc_workers()->active_workers(),
3648                                         Threads::number_of_non_daemon_threads());
3649   conc_workers()->set_active_workers(num_workers);
3650 
3651   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3652 
3653   CMSConcMarkingTask tsk(this,
3654                          cms_space,
3655                          conc_workers(),
3656                          task_queues());
3657 
3658   // Since the actual number of workers we get may be different
3659   // from the number we requested above, do we need to do anything different
3660   // below? In particular, may be we need to subclass the SequantialSubTasksDone
3661   // class?? XXX
3662   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3663 
3664   // Refs discovery is already non-atomic.
3665   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");


4474   if (PrintCMSStatistics != 0) {
4475     gclog_or_tty->print_cr(
4476       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4477       worker_id, _timer.seconds());
4478   }
4479 }
4480 
4481 // Parallel remark task
4482 class CMSParRemarkTask: public CMSParMarkTask {
4483   CompactibleFreeListSpace* _cms_space;
4484 
4485   // The per-thread work queues, available here for stealing.
4486   OopTaskQueueSet*       _task_queues;
4487   ParallelTaskTerminator _term;
4488 
4489  public:
4490   // A value of 0 passed to n_workers will cause the number of
4491   // workers to be taken from the active workers in the work gang.
4492   CMSParRemarkTask(CMSCollector* collector,
4493                    CompactibleFreeListSpace* cms_space,
4494                    uint n_workers, FlexibleWorkGang* workers,
4495                    OopTaskQueueSet* task_queues):
4496     CMSParMarkTask("Rescan roots and grey objects in parallel",
4497                    collector, n_workers),
4498     _cms_space(cms_space),
4499     _task_queues(task_queues),
4500     _term(n_workers, task_queues) { }
4501 
4502   OopTaskQueueSet* task_queues() { return _task_queues; }
4503 
4504   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4505 
4506   ParallelTaskTerminator* terminator() { return &_term; }
4507   uint n_workers() { return _n_workers; }
4508 
4509   void work(uint worker_id);
4510 
4511  private:
4512   // ... of  dirty cards in old space
4513   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4514                                   Par_MarkRefsIntoAndScanClosure* cl);
4515 
4516   // ... work stealing for the above
4517   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4518 };
4519 
4520 class RemarkKlassClosure : public KlassClosure {
4521   KlassToOopClosure _cm_klass_closure;
4522  public:
4523   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4524   void do_klass(Klass* k) {
4525     // Check if we have modified any oops in the Klass during the concurrent marking.
4526     if (k->has_accumulated_modified_oops()) {
4527       k->clear_accumulated_modified_oops();


5050     SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5051     assert(!pst->valid(), "Clobbering existing data?");
5052     size_t n_tasks = _survivor_chunk_index + 1;
5053     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5054     // Sets the condition for completion of the subtask (how many threads
5055     // need to finish in order to be done).
5056     pst->set_n_threads(n_threads);
5057     pst->set_n_tasks((int)n_tasks);
5058     assert(pst->valid(), "Error");
5059   }
5060 }
5061 
5062 // Parallel version of remark
5063 void CMSCollector::do_remark_parallel() {
5064   GenCollectedHeap* gch = GenCollectedHeap::heap();
5065   FlexibleWorkGang* workers = gch->workers();
5066   assert(workers != NULL, "Need parallel worker threads.");
5067   // Choose to use the number of GC workers most recently set
5068   // into "active_workers".  If active_workers is not set, set it
5069   // to ParallelGCThreads.
5070   uint n_workers = workers->active_workers();
5071   if (n_workers == 0) {
5072     assert(n_workers > 0, "Should have been set during scavenge");
5073     n_workers = ParallelGCThreads;
5074     workers->set_active_workers(n_workers);
5075   }
5076   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5077 
5078   CMSParRemarkTask tsk(this,
5079     cms_space,
5080     n_workers, workers, task_queues());
5081 
5082   // Set up for parallel process_roots work.
5083   gch->set_par_threads(n_workers);
5084   // We won't be iterating over the cards in the card table updating
5085   // the younger_gen cards, so we shouldn't call the following else
5086   // the verification code as well as subsequent younger_refs_iterate
5087   // code would get confused. XXX
5088   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5089 
5090   // The young gen rescan work will not be done as part of


5416   // Process weak references.
5417   rp->setup_policy(false);
5418   verify_work_stacks_empty();
5419 
5420   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5421                                           &_markStack, false /* !preclean */);
5422   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5423                                 _span, &_markBitMap, &_markStack,
5424                                 &cmsKeepAliveClosure, false /* !preclean */);
5425   {
5426     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5427 
5428     ReferenceProcessorStats stats;
5429     if (rp->processing_is_mt()) {
5430       // Set the degree of MT here.  If the discovery is done MT, there
5431       // may have been a different number of threads doing the discovery
5432       // and a different number of discovered lists may have Ref objects.
5433       // That is OK as long as the Reference lists are balanced (see
5434       // balance_all_queues() and balance_queues()).
5435       GenCollectedHeap* gch = GenCollectedHeap::heap();
5436       uint active_workers = ParallelGCThreads;
5437       FlexibleWorkGang* workers = gch->workers();
5438       if (workers != NULL) {
5439         active_workers = workers->active_workers();
5440         // The expectation is that active_workers will have already
5441         // been set to a reasonable value.  If it has not been set,
5442         // investigate.
5443         assert(active_workers > 0, "Should have been set during scavenge");
5444       }
5445       rp->set_active_mt_degree(active_workers);
5446       CMSRefProcTaskExecutor task_executor(*this);
5447       stats = rp->process_discovered_references(&_is_alive_closure,
5448                                         &cmsKeepAliveClosure,
5449                                         &cmsDrainMarkingStackClosure,
5450                                         &task_executor,
5451                                         _gc_timer_cm,
5452                                         _gc_tracer_cm->gc_id());
5453     } else {
5454       stats = rp->process_discovered_references(&_is_alive_closure,
5455                                         &cmsKeepAliveClosure,
5456                                         &cmsDrainMarkingStackClosure,


< prev index next >