2297 // the beginning of CMTask::do_marking_step() for those conditions -
2298 // one of which is reaching the specified time target.) It is only
2299 // when CMTask::do_marking_step() returns without setting the
2300 // has_aborted() flag that the marking step has completed.
2301
2302 _task->do_marking_step(1000000000.0 /* something very large */,
2303 true /* do_termination */,
2304 _is_serial);
2305 } while (_task->has_aborted() && !_cm->has_overflown());
2306 }
2307 };
2308
2309 // Implementation of AbstractRefProcTaskExecutor for parallel
2310 // reference processing at the end of G1 concurrent marking
2311
2312 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2313 private:
2314 G1CollectedHeap* _g1h;
2315 ConcurrentMark* _cm;
2316 WorkGang* _workers;
2317 int _active_workers;
2318
2319 public:
2320 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2321 ConcurrentMark* cm,
2322 WorkGang* workers,
2323 int n_workers) :
2324 _g1h(g1h), _cm(cm),
2325 _workers(workers), _active_workers(n_workers) { }
2326
2327 // Executes the given task using concurrent marking worker threads.
2328 virtual void execute(ProcessTask& task);
2329 virtual void execute(EnqueueTask& task);
2330 };
2331
2332 class G1CMRefProcTaskProxy: public AbstractGangTask {
2333 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2334 ProcessTask& _proc_task;
2335 G1CollectedHeap* _g1h;
2336 ConcurrentMark* _cm;
2337
2338 public:
2339 G1CMRefProcTaskProxy(ProcessTask& proc_task,
2340 G1CollectedHeap* g1h,
2341 ConcurrentMark* cm) :
2342 AbstractGangTask("Process reference objects in parallel"),
2343 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2613 task->record_start_time();
2614 {
2615 ResourceMark rm;
2616 HandleMark hm;
2617
2618 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
2619 Threads::threads_do(&threads_f);
2620 }
2621
2622 do {
2623 task->do_marking_step(1000000000.0 /* something very large */,
2624 true /* do_termination */,
2625 false /* is_serial */);
2626 } while (task->has_aborted() && !_cm->has_overflown());
2627 // If we overflow, then we do not want to restart. We instead
2628 // want to abort remark and do concurrent marking again.
2629 task->record_end_time();
2630 }
2631 }
2632
2633 CMRemarkTask(ConcurrentMark* cm, int active_workers) :
2634 AbstractGangTask("Par Remark"), _cm(cm) {
2635 _cm->terminator()->reset_for_reuse(active_workers);
2636 }
2637 };
2638
2639 void ConcurrentMark::checkpointRootsFinalWork() {
2640 ResourceMark rm;
2641 HandleMark hm;
2642 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2643
2644 G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2645
2646 g1h->ensure_parsability(false);
2647
2648 StrongRootsScope srs;
2649 // this is remark, so we'll use up all active threads
2650 uint active_workers = g1h->workers()->active_workers();
2651 if (active_workers == 0) {
2652 assert(active_workers > 0, "Should have been set earlier");
2653 active_workers = (uint) ParallelGCThreads;
3016 // passed in for left offset here.
3017 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3018 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3019 }
3020 }
3021
3022 // Update the marked bytes for this region.
3023 hr->add_to_marked_bytes(marked_bytes);
3024
3025 // Next heap region
3026 return false;
3027 }
3028 };
3029
3030 class G1AggregateCountDataTask: public AbstractGangTask {
3031 protected:
3032 G1CollectedHeap* _g1h;
3033 ConcurrentMark* _cm;
3034 BitMap* _cm_card_bm;
3035 uint _max_worker_id;
3036 int _active_workers;
3037 HeapRegionClaimer _hrclaimer;
3038
3039 public:
3040 G1AggregateCountDataTask(G1CollectedHeap* g1h,
3041 ConcurrentMark* cm,
3042 BitMap* cm_card_bm,
3043 uint max_worker_id,
3044 int n_workers) :
3045 AbstractGangTask("Count Aggregation"),
3046 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3047 _max_worker_id(max_worker_id),
3048 _active_workers(n_workers),
3049 _hrclaimer(_active_workers) {
3050 }
3051
3052 void work(uint worker_id) {
3053 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3054
3055 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
3056 }
3057 };
3058
3059
3060 void ConcurrentMark::aggregate_count_data() {
3061 int n_workers = _g1h->workers()->active_workers();
3062
3063 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3064 _max_worker_id, n_workers);
3065
3066 _g1h->set_par_threads(n_workers);
3067 _g1h->workers()->run_task(&g1_par_agg_task);
3068 _g1h->set_par_threads(0);
3069 }
3070
3071 // Clear the per-worker arrays used to store the per-region counting data
3072 void ConcurrentMark::clear_all_count_data() {
3073 // Clear the global card bitmap - it will be filled during
3074 // liveness count aggregation (during remark) and the
3075 // final counting task.
3076 _card_bm.clear();
3077
3078 // Clear the global region bitmap - it will be filled as part
3079 // of the final counting task.
3080 _region_bm.clear();
3081
|
2297 // the beginning of CMTask::do_marking_step() for those conditions -
2298 // one of which is reaching the specified time target.) It is only
2299 // when CMTask::do_marking_step() returns without setting the
2300 // has_aborted() flag that the marking step has completed.
2301
2302 _task->do_marking_step(1000000000.0 /* something very large */,
2303 true /* do_termination */,
2304 _is_serial);
2305 } while (_task->has_aborted() && !_cm->has_overflown());
2306 }
2307 };
2308
2309 // Implementation of AbstractRefProcTaskExecutor for parallel
2310 // reference processing at the end of G1 concurrent marking
2311
2312 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2313 private:
2314 G1CollectedHeap* _g1h;
2315 ConcurrentMark* _cm;
2316 WorkGang* _workers;
2317 uint _active_workers;
2318
2319 public:
2320 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2321 ConcurrentMark* cm,
2322 WorkGang* workers,
2323 uint n_workers) :
2324 _g1h(g1h), _cm(cm),
2325 _workers(workers), _active_workers(n_workers) { }
2326
2327 // Executes the given task using concurrent marking worker threads.
2328 virtual void execute(ProcessTask& task);
2329 virtual void execute(EnqueueTask& task);
2330 };
2331
2332 class G1CMRefProcTaskProxy: public AbstractGangTask {
2333 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2334 ProcessTask& _proc_task;
2335 G1CollectedHeap* _g1h;
2336 ConcurrentMark* _cm;
2337
2338 public:
2339 G1CMRefProcTaskProxy(ProcessTask& proc_task,
2340 G1CollectedHeap* g1h,
2341 ConcurrentMark* cm) :
2342 AbstractGangTask("Process reference objects in parallel"),
2343 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2613 task->record_start_time();
2614 {
2615 ResourceMark rm;
2616 HandleMark hm;
2617
2618 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
2619 Threads::threads_do(&threads_f);
2620 }
2621
2622 do {
2623 task->do_marking_step(1000000000.0 /* something very large */,
2624 true /* do_termination */,
2625 false /* is_serial */);
2626 } while (task->has_aborted() && !_cm->has_overflown());
2627 // If we overflow, then we do not want to restart. We instead
2628 // want to abort remark and do concurrent marking again.
2629 task->record_end_time();
2630 }
2631 }
2632
2633 CMRemarkTask(ConcurrentMark* cm, uint active_workers) :
2634 AbstractGangTask("Par Remark"), _cm(cm) {
2635 _cm->terminator()->reset_for_reuse(active_workers);
2636 }
2637 };
2638
2639 void ConcurrentMark::checkpointRootsFinalWork() {
2640 ResourceMark rm;
2641 HandleMark hm;
2642 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2643
2644 G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2645
2646 g1h->ensure_parsability(false);
2647
2648 StrongRootsScope srs;
2649 // this is remark, so we'll use up all active threads
2650 uint active_workers = g1h->workers()->active_workers();
2651 if (active_workers == 0) {
2652 assert(active_workers > 0, "Should have been set earlier");
2653 active_workers = (uint) ParallelGCThreads;
3016 // passed in for left offset here.
3017 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3018 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3019 }
3020 }
3021
3022 // Update the marked bytes for this region.
3023 hr->add_to_marked_bytes(marked_bytes);
3024
3025 // Next heap region
3026 return false;
3027 }
3028 };
3029
3030 class G1AggregateCountDataTask: public AbstractGangTask {
3031 protected:
3032 G1CollectedHeap* _g1h;
3033 ConcurrentMark* _cm;
3034 BitMap* _cm_card_bm;
3035 uint _max_worker_id;
3036 uint _active_workers;
3037 HeapRegionClaimer _hrclaimer;
3038
3039 public:
3040 G1AggregateCountDataTask(G1CollectedHeap* g1h,
3041 ConcurrentMark* cm,
3042 BitMap* cm_card_bm,
3043 uint max_worker_id,
3044 uint n_workers) :
3045 AbstractGangTask("Count Aggregation"),
3046 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3047 _max_worker_id(max_worker_id),
3048 _active_workers(n_workers),
3049 _hrclaimer(_active_workers) {
3050 }
3051
3052 void work(uint worker_id) {
3053 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3054
3055 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
3056 }
3057 };
3058
3059
3060 void ConcurrentMark::aggregate_count_data() {
3061 uint n_workers = _g1h->workers()->active_workers();
3062
3063 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3064 _max_worker_id, n_workers);
3065
3066 _g1h->set_par_threads(n_workers);
3067 _g1h->workers()->run_task(&g1_par_agg_task);
3068 _g1h->set_par_threads(0);
3069 }
3070
3071 // Clear the per-worker arrays used to store the per-region counting data
3072 void ConcurrentMark::clear_all_count_data() {
3073 // Clear the global card bitmap - it will be filled during
3074 // liveness count aggregation (during remark) and the
3075 // final counting task.
3076 _card_bm.clear();
3077
3078 // Clear the global region bitmap - it will be filled as part
3079 // of the final counting task.
3080 _region_bm.clear();
3081
|