< prev index next >

src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 8068 : 6407976: GC worker number should be unsigned
Reviewed-by: jwilhelm


2303       // the beginning of CMTask::do_marking_step() for those conditions -
2304       // one of which is reaching the specified time target.) It is only
2305       // when CMTask::do_marking_step() returns without setting the
2306       // has_aborted() flag that the marking step has completed.
2307 
2308       _task->do_marking_step(1000000000.0 /* something very large */,
2309                              true         /* do_termination */,
2310                              _is_serial);
2311     } while (_task->has_aborted() && !_cm->has_overflown());
2312   }
2313 };
2314 
2315 // Implementation of AbstractRefProcTaskExecutor for parallel
2316 // reference processing at the end of G1 concurrent marking
2317 
2318 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2319 private:
2320   G1CollectedHeap* _g1h;
2321   ConcurrentMark*  _cm;
2322   WorkGang*        _workers;
2323   int              _active_workers;
2324 
2325 public:
2326   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2327                         ConcurrentMark* cm,
2328                         WorkGang* workers,
2329                         int n_workers) :
2330     _g1h(g1h), _cm(cm),
2331     _workers(workers), _active_workers(n_workers) { }
2332 
2333   // Executes the given task using concurrent marking worker threads.
2334   virtual void execute(ProcessTask& task);
2335   virtual void execute(EnqueueTask& task);
2336 };
2337 
2338 class G1CMRefProcTaskProxy: public AbstractGangTask {
2339   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2340   ProcessTask&     _proc_task;
2341   G1CollectedHeap* _g1h;
2342   ConcurrentMark*  _cm;
2343 
2344 public:
2345   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2346                      G1CollectedHeap* g1h,
2347                      ConcurrentMark* cm) :
2348     AbstractGangTask("Process reference objects in parallel"),
2349     _proc_task(proc_task), _g1h(g1h), _cm(cm) {


2619       task->record_start_time();
2620       {
2621         ResourceMark rm;
2622         HandleMark hm;
2623 
2624         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
2625         Threads::threads_do(&threads_f);
2626       }
2627 
2628       do {
2629         task->do_marking_step(1000000000.0 /* something very large */,
2630                               true         /* do_termination       */,
2631                               false        /* is_serial            */);
2632       } while (task->has_aborted() && !_cm->has_overflown());
2633       // If we overflow, then we do not want to restart. We instead
2634       // want to abort remark and do concurrent marking again.
2635       task->record_end_time();
2636     }
2637   }
2638 
2639   CMRemarkTask(ConcurrentMark* cm, int active_workers) :
2640     AbstractGangTask("Par Remark"), _cm(cm) {
2641     _cm->terminator()->reset_for_reuse(active_workers);
2642   }
2643 };
2644 
2645 void ConcurrentMark::checkpointRootsFinalWork() {
2646   ResourceMark rm;
2647   HandleMark   hm;
2648   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2649 
2650   G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2651 
2652   g1h->ensure_parsability(false);
2653 
2654   StrongRootsScope srs;
2655   // this is remark, so we'll use up all active threads
2656   uint active_workers = g1h->workers()->active_workers();
2657   if (active_workers == 0) {
2658     assert(active_workers > 0, "Should have been set earlier");
2659     active_workers = (uint) ParallelGCThreads;


3182         // passed in for left offset here.
3183         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3184         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3185       }
3186     }
3187 
3188     // Update the marked bytes for this region.
3189     hr->add_to_marked_bytes(marked_bytes);
3190 
3191     // Next heap region
3192     return false;
3193   }
3194 };
3195 
3196 class G1AggregateCountDataTask: public AbstractGangTask {
3197 protected:
3198   G1CollectedHeap* _g1h;
3199   ConcurrentMark* _cm;
3200   BitMap* _cm_card_bm;
3201   uint _max_worker_id;
3202   int _active_workers;
3203   HeapRegionClaimer _hrclaimer;
3204 
3205 public:
3206   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3207                            ConcurrentMark* cm,
3208                            BitMap* cm_card_bm,
3209                            uint max_worker_id,
3210                            int n_workers) :
3211       AbstractGangTask("Count Aggregation"),
3212       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3213       _max_worker_id(max_worker_id),
3214       _active_workers(n_workers),
3215       _hrclaimer(_active_workers) {
3216   }
3217 
3218   void work(uint worker_id) {
3219     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3220 
3221     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
3222   }
3223 };
3224 
3225 
3226 void ConcurrentMark::aggregate_count_data() {
3227   int n_workers = _g1h->workers()->active_workers();
3228 
3229   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3230                                            _max_worker_id, n_workers);
3231 
3232   _g1h->set_par_threads(n_workers);
3233   _g1h->workers()->run_task(&g1_par_agg_task);
3234   _g1h->set_par_threads(0);
3235 }
3236 
3237 // Clear the per-worker arrays used to store the per-region counting data
3238 void ConcurrentMark::clear_all_count_data() {
3239   // Clear the global card bitmap - it will be filled during
3240   // liveness count aggregation (during remark) and the
3241   // final counting task.
3242   _card_bm.clear();
3243 
3244   // Clear the global region bitmap - it will be filled as part
3245   // of the final counting task.
3246   _region_bm.clear();
3247 




2303       // the beginning of CMTask::do_marking_step() for those conditions -
2304       // one of which is reaching the specified time target.) It is only
2305       // when CMTask::do_marking_step() returns without setting the
2306       // has_aborted() flag that the marking step has completed.
2307 
2308       _task->do_marking_step(1000000000.0 /* something very large */,
2309                              true         /* do_termination */,
2310                              _is_serial);
2311     } while (_task->has_aborted() && !_cm->has_overflown());
2312   }
2313 };
2314 
2315 // Implementation of AbstractRefProcTaskExecutor for parallel
2316 // reference processing at the end of G1 concurrent marking
2317 
2318 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2319 private:
2320   G1CollectedHeap* _g1h;
2321   ConcurrentMark*  _cm;
2322   WorkGang*        _workers;
2323   uint             _active_workers;
2324 
2325 public:
2326   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2327                         ConcurrentMark* cm,
2328                         WorkGang* workers,
2329                         uint n_workers) :
2330     _g1h(g1h), _cm(cm),
2331     _workers(workers), _active_workers(n_workers) { }
2332 
2333   // Executes the given task using concurrent marking worker threads.
2334   virtual void execute(ProcessTask& task);
2335   virtual void execute(EnqueueTask& task);
2336 };
2337 
2338 class G1CMRefProcTaskProxy: public AbstractGangTask {
2339   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2340   ProcessTask&     _proc_task;
2341   G1CollectedHeap* _g1h;
2342   ConcurrentMark*  _cm;
2343 
2344 public:
2345   G1CMRefProcTaskProxy(ProcessTask& proc_task,
2346                      G1CollectedHeap* g1h,
2347                      ConcurrentMark* cm) :
2348     AbstractGangTask("Process reference objects in parallel"),
2349     _proc_task(proc_task), _g1h(g1h), _cm(cm) {


2619       task->record_start_time();
2620       {
2621         ResourceMark rm;
2622         HandleMark hm;
2623 
2624         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
2625         Threads::threads_do(&threads_f);
2626       }
2627 
2628       do {
2629         task->do_marking_step(1000000000.0 /* something very large */,
2630                               true         /* do_termination       */,
2631                               false        /* is_serial            */);
2632       } while (task->has_aborted() && !_cm->has_overflown());
2633       // If we overflow, then we do not want to restart. We instead
2634       // want to abort remark and do concurrent marking again.
2635       task->record_end_time();
2636     }
2637   }
2638 
2639   CMRemarkTask(ConcurrentMark* cm, uint active_workers) :
2640     AbstractGangTask("Par Remark"), _cm(cm) {
2641     _cm->terminator()->reset_for_reuse(active_workers);
2642   }
2643 };
2644 
2645 void ConcurrentMark::checkpointRootsFinalWork() {
2646   ResourceMark rm;
2647   HandleMark   hm;
2648   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2649 
2650   G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2651 
2652   g1h->ensure_parsability(false);
2653 
2654   StrongRootsScope srs;
2655   // this is remark, so we'll use up all active threads
2656   uint active_workers = g1h->workers()->active_workers();
2657   if (active_workers == 0) {
2658     assert(active_workers > 0, "Should have been set earlier");
2659     active_workers = (uint) ParallelGCThreads;


3182         // passed in for left offset here.
3183         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3184         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3185       }
3186     }
3187 
3188     // Update the marked bytes for this region.
3189     hr->add_to_marked_bytes(marked_bytes);
3190 
3191     // Next heap region
3192     return false;
3193   }
3194 };
3195 
3196 class G1AggregateCountDataTask: public AbstractGangTask {
3197 protected:
3198   G1CollectedHeap* _g1h;
3199   ConcurrentMark* _cm;
3200   BitMap* _cm_card_bm;
3201   uint _max_worker_id;
3202   uint _active_workers;
3203   HeapRegionClaimer _hrclaimer;
3204 
3205 public:
3206   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3207                            ConcurrentMark* cm,
3208                            BitMap* cm_card_bm,
3209                            uint max_worker_id,
3210                            uint n_workers) :
3211       AbstractGangTask("Count Aggregation"),
3212       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3213       _max_worker_id(max_worker_id),
3214       _active_workers(n_workers),
3215       _hrclaimer(_active_workers) {
3216   }
3217 
3218   void work(uint worker_id) {
3219     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3220 
3221     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
3222   }
3223 };
3224 
3225 
3226 void ConcurrentMark::aggregate_count_data() {
3227   uint n_workers = _g1h->workers()->active_workers();
3228 
3229   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3230                                            _max_worker_id, n_workers);
3231 
3232   _g1h->set_par_threads(n_workers);
3233   _g1h->workers()->run_task(&g1_par_agg_task);
3234   _g1h->set_par_threads(0);
3235 }
3236 
3237 // Clear the per-worker arrays used to store the per-region counting data
3238 void ConcurrentMark::clear_all_count_data() {
3239   // Clear the global card bitmap - it will be filled during
3240   // liveness count aggregation (during remark) and the
3241   // final counting task.
3242   _card_bm.clear();
3243 
3244   // Clear the global region bitmap - it will be filled as part
3245   // of the final counting task.
3246   _region_bm.clear();
3247 


< prev index next >