73 };
74
75 HeapRegionGatheringOopClosure _oc;
76 public:
77 G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
78
79 void do_code_blob(CodeBlob* cb) {
80 nmethod* nm = cb->as_nmethod_or_null();
81 if (nm != NULL) {
82 if (!nm->test_set_oops_do_mark()) {
83 _oc.set_nm(nm);
84 nm->oops_do(&_oc);
85 nm->fix_oop_relocations();
86 }
87 }
88 }
89 };
90
91
92 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
93 uint n_workers = _g1h->n_par_threads();
94 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
95
96 uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
97 if (new_value == n_workers) {
98 // This thread is last. Notify the others.
99 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
100 _lock.notify_all();
101 }
102 }
103
104 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
105 uint n_workers = _g1h->n_par_threads();
106 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
107
108 if ((uint)_n_workers_discovered_strong_classes != n_workers) {
109 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
110 while ((uint)_n_workers_discovered_strong_classes != n_workers) {
111 _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
112 }
113 }
114 }
115
116 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) :
117 _g1h(g1h),
118 _process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)),
119 _srs(),
120 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
121 _n_workers_discovered_strong_classes(0) {}
122
123 void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
124 OopClosure* scan_non_heap_weak_roots,
125 CLDClosure* scan_strong_clds,
126 CLDClosure* scan_weak_clds,
127 bool trace_metadata,
128 uint worker_i) {
129 // First scan the shared roots.
130 double ext_roots_start = os::elapsedTime();
131 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
132
133 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
134 BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
135
136 OopClosure* const weak_roots = &buf_scan_non_heap_weak_roots;
137 OopClosure* const strong_roots = &buf_scan_non_heap_roots;
138
139 // CodeBlobClosures are not interoperable with BufferingOopClosures
236 void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
237 CLDClosure* thread_stack_clds,
238 CLDClosure* strong_clds,
239 CLDClosure* weak_clds,
240 CodeBlobClosure* strong_code,
241 G1GCPhaseTimes* phase_times,
242 uint worker_i) {
243 assert(thread_stack_clds == NULL || weak_clds == NULL, "There is overlap between those, only one may be set");
244 // Iterating over the CLDG and the Threads are done early to allow us to
245 // first process the strong CLDs and nmethods and then, after a barrier,
246 // let the thread process the weak CLDs and nmethods.
247 {
248 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
249 if (!_process_strong_tasks->is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
250 ClassLoaderDataGraph::roots_cld_do(strong_clds, weak_clds);
251 }
252 }
253
254 {
255 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
256 bool is_par = _g1h->n_par_threads() > 1;
257 Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
258 }
259 }
260
261 void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
262 OopClosure* weak_roots,
263 G1GCPhaseTimes* phase_times,
264 uint worker_i) {
265 {
266 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
267 if (!_process_strong_tasks->is_task_claimed(G1RP_PS_Universe_oops_do)) {
268 Universe::oops_do(strong_roots);
269 }
270 }
271
272 {
273 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
274 if (!_process_strong_tasks->is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
275 JNIHandles::oops_do(strong_roots);
276 }
313
314 {
315 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
316 // All threads execute the following. A specific chunk of buckets
317 // from the StringTable are the individual tasks.
318 if (weak_roots != NULL) {
319 StringTable::possibly_parallel_oops_do(weak_roots);
320 }
321 }
322 }
323
324 void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
325 OopClosure* scan_non_heap_weak_roots,
326 uint worker_i) {
327 G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
328
329 _g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
330 }
331
332 void G1RootProcessor::set_num_workers(uint active_workers) {
333 _process_strong_tasks->set_n_threads(active_workers);
334 }
|
73 };
74
75 HeapRegionGatheringOopClosure _oc;
76 public:
77 G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
78
79 void do_code_blob(CodeBlob* cb) {
80 nmethod* nm = cb->as_nmethod_or_null();
81 if (nm != NULL) {
82 if (!nm->test_set_oops_do_mark()) {
83 _oc.set_nm(nm);
84 nm->oops_do(&_oc);
85 nm->fix_oop_relocations();
86 }
87 }
88 }
89 };
90
91
92 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
93 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
94
95 uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
96 if (new_value == n_workers()) {
97 // This thread is last. Notify the others.
98 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
99 _lock.notify_all();
100 }
101 }
102
103 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
104 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
105
106 if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
107 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
108 while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
109 _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
110 }
111 }
112 }
113
114 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
115 _g1h(g1h),
116 _process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)),
117 _srs(n_workers),
118 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
119 _n_workers_discovered_strong_classes(0) {}
120
121 void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
122 OopClosure* scan_non_heap_weak_roots,
123 CLDClosure* scan_strong_clds,
124 CLDClosure* scan_weak_clds,
125 bool trace_metadata,
126 uint worker_i) {
127 // First scan the shared roots.
128 double ext_roots_start = os::elapsedTime();
129 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
130
131 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
132 BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
133
134 OopClosure* const weak_roots = &buf_scan_non_heap_weak_roots;
135 OopClosure* const strong_roots = &buf_scan_non_heap_roots;
136
137 // CodeBlobClosures are not interoperable with BufferingOopClosures
234 void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
235 CLDClosure* thread_stack_clds,
236 CLDClosure* strong_clds,
237 CLDClosure* weak_clds,
238 CodeBlobClosure* strong_code,
239 G1GCPhaseTimes* phase_times,
240 uint worker_i) {
241 assert(thread_stack_clds == NULL || weak_clds == NULL, "There is overlap between those, only one may be set");
242 // Iterating over the CLDG and the Threads are done early to allow us to
243 // first process the strong CLDs and nmethods and then, after a barrier,
244 // let the thread process the weak CLDs and nmethods.
245 {
246 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
247 if (!_process_strong_tasks->is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
248 ClassLoaderDataGraph::roots_cld_do(strong_clds, weak_clds);
249 }
250 }
251
252 {
253 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
254 bool is_par = n_workers() > 1;
255 Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
256 }
257 }
258
259 void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
260 OopClosure* weak_roots,
261 G1GCPhaseTimes* phase_times,
262 uint worker_i) {
263 {
264 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
265 if (!_process_strong_tasks->is_task_claimed(G1RP_PS_Universe_oops_do)) {
266 Universe::oops_do(strong_roots);
267 }
268 }
269
270 {
271 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
272 if (!_process_strong_tasks->is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
273 JNIHandles::oops_do(strong_roots);
274 }
311
312 {
313 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
314 // All threads execute the following. A specific chunk of buckets
315 // from the StringTable are the individual tasks.
316 if (weak_roots != NULL) {
317 StringTable::possibly_parallel_oops_do(weak_roots);
318 }
319 }
320 }
321
322 void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
323 OopClosure* scan_non_heap_weak_roots,
324 uint worker_i) {
325 G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
326
327 _g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
328 }
329
330 void G1RootProcessor::set_num_workers(uint active_workers) {
331 assert(active_workers == _srs.n_threads(),
332 err_msg("Mismatch between number of worker threads. active_workers: %u and n_workers(): %u",
333 active_workers,
334 _srs.n_threads()));
335 _process_strong_tasks->set_n_threads(active_workers);
336 }
337
338 uint G1RootProcessor::n_workers() const {
339 return _srs.n_threads();
340 }
|