66 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
67 _g1h(g1h),
68 _process_strong_tasks(G1RP_PS_NumElements),
69 _srs(n_workers),
70 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
71 _n_workers_discovered_strong_classes(0) {}
72
73 void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i) {
74 double ext_roots_start = os::elapsedTime();
75 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
76
77 process_java_roots(closures, phase_times, worker_i);
78
79 // This is the point where this worker thread will not find more strong CLDs/nmethods.
80 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
81 if (closures->trace_metadata()) {
82 worker_has_discovered_all_strong_classes();
83 }
84
85 process_vm_roots(closures, phase_times, worker_i);
86
87 {
88 // Now the CM ref_processor roots.
89 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
90 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
91 // We need to treat the discovered reference lists of the
92 // concurrent mark ref processor as roots and keep entries
93 // (which are added by the marking threads) on them live
94 // until they can be processed at the end of marking.
95 _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
96 }
97 }
98
99 if (closures->trace_metadata()) {
100 {
101 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
102 // Barrier to make sure all workers passed
103 // the strong CLD and strong nmethods phases.
104 wait_until_all_strong_classes_discovered();
105 }
180
181 // By returning the same CLDClosure for both weak and strong CLDs we ensure
182 // that a single walk of the CLDG will invoke the closure on all CLDs i the
183 // system.
184 CLDClosure* weak_clds() { return _clds; }
185 CLDClosure* strong_clds() { return _clds; }
186
187 // We don't want to visit code blobs more than once, so we return NULL for the
188 // strong case and walk the entire code cache as a separate step.
189 CodeBlobClosure* strong_codeblobs() { return NULL; }
190 };
191
192 void G1RootProcessor::process_all_roots(OopClosure* oops,
193 CLDClosure* clds,
194 CodeBlobClosure* blobs) {
195 AllRootsClosures closures(oops, clds);
196
197 process_java_roots(&closures, NULL, 0);
198 process_vm_roots(&closures, NULL, 0);
199
200 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
201 CodeCache::blobs_do(blobs);
202 }
203
204 _process_strong_tasks.all_tasks_completed(n_workers());
205 }
206
207 void G1RootProcessor::process_java_roots(G1RootClosures* closures,
208 G1GCPhaseTimes* phase_times,
209 uint worker_i) {
210 // Iterating over the CLDG and the Threads are done early to allow us to
211 // first process the strong CLDs and nmethods and then, after a barrier,
212 // let the thread process the weak CLDs and nmethods.
213 {
214 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
215 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
216 ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
217 }
218 }
219
220 {
221 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
222 bool is_par = n_workers() > 1;
263 {
264 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
265 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
266 Management::oops_do(strong_roots);
267 }
268 }
269
270 {
271 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
272 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) {
273 JvmtiExport::oops_do(strong_roots);
274 }
275 }
276
277 {
278 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
279 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
280 SystemDictionary::roots_oops_do(strong_roots, weak_roots);
281 }
282 }
283
284 {
285 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
286 // All threads execute the following. A specific chunk of buckets
287 // from the StringTable are the individual tasks.
288 if (weak_roots != NULL) {
289 StringTable::possibly_parallel_oops_do(weak_roots);
290 }
291 }
292 }
293
294 uint G1RootProcessor::n_workers() const {
295 return _srs.n_threads();
296 }
|
66 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
67 _g1h(g1h),
68 _process_strong_tasks(G1RP_PS_NumElements),
69 _srs(n_workers),
70 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
71 _n_workers_discovered_strong_classes(0) {}
72
73 void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i) {
74 double ext_roots_start = os::elapsedTime();
75 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
76
77 process_java_roots(closures, phase_times, worker_i);
78
79 // This is the point where this worker thread will not find more strong CLDs/nmethods.
80 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
81 if (closures->trace_metadata()) {
82 worker_has_discovered_all_strong_classes();
83 }
84
85 process_vm_roots(closures, phase_times, worker_i);
86 process_string_table_roots(closures, phase_times, worker_i);
87
88 {
89 // Now the CM ref_processor roots.
90 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
91 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
92 // We need to treat the discovered reference lists of the
93 // concurrent mark ref processor as roots and keep entries
94 // (which are added by the marking threads) on them live
95 // until they can be processed at the end of marking.
96 _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
97 }
98 }
99
100 if (closures->trace_metadata()) {
101 {
102 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
103 // Barrier to make sure all workers passed
104 // the strong CLD and strong nmethods phases.
105 wait_until_all_strong_classes_discovered();
106 }
181
182 // By returning the same CLDClosure for both weak and strong CLDs we ensure
183 // that a single walk of the CLDG will invoke the closure on all CLDs i the
184 // system.
185 CLDClosure* weak_clds() { return _clds; }
186 CLDClosure* strong_clds() { return _clds; }
187
188 // We don't want to visit code blobs more than once, so we return NULL for the
189 // strong case and walk the entire code cache as a separate step.
190 CodeBlobClosure* strong_codeblobs() { return NULL; }
191 };
192
193 void G1RootProcessor::process_all_roots(OopClosure* oops,
194 CLDClosure* clds,
195 CodeBlobClosure* blobs) {
196 AllRootsClosures closures(oops, clds);
197
198 process_java_roots(&closures, NULL, 0);
199 process_vm_roots(&closures, NULL, 0);
200
201 process_string_table_roots(&closures, NULL, 0);
202 process_code_cache_roots(blobs, NULL, 0);
203
204 _process_strong_tasks.all_tasks_completed(n_workers());
205 }
206
207 void G1RootProcessor::process_all_roots_no_string_table(OopClosure* oops,
208 CLDClosure* clds,
209 CodeBlobClosure* blobs) {
210 assert(!ClassUnloading, "Should only be used when class unloading is disabled");
211 AllRootsClosures closures(oops, clds);
212
213 process_java_roots(&closures, NULL, 0);
214 process_vm_roots(&closures, NULL, 0);
215
216 process_code_cache_roots(blobs, NULL, 0);
217
218 _process_strong_tasks.all_tasks_completed(n_workers());
219 }
220
221 void G1RootProcessor::process_java_roots(G1RootClosures* closures,
222 G1GCPhaseTimes* phase_times,
223 uint worker_i) {
224 // Iterating over the CLDG and the Threads are done early to allow us to
225 // first process the strong CLDs and nmethods and then, after a barrier,
226 // let the thread process the weak CLDs and nmethods.
227 {
228 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
229 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
230 ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
231 }
232 }
233
234 {
235 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
236 bool is_par = n_workers() > 1;
277 {
278 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
279 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
280 Management::oops_do(strong_roots);
281 }
282 }
283
284 {
285 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
286 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) {
287 JvmtiExport::oops_do(strong_roots);
288 }
289 }
290
291 {
292 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
293 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
294 SystemDictionary::roots_oops_do(strong_roots, weak_roots);
295 }
296 }
297 }
298
299 void G1RootProcessor::process_string_table_roots(G1RootClosures* closures,
300 G1GCPhaseTimes* phase_times,
301 uint worker_i) {
302 assert(closures->weak_oops() != NULL, "Should only be called when all roots are processed");
303 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
304 // All threads execute the following. A specific chunk of buckets
305 // from the StringTable are the individual tasks.
306 StringTable::possibly_parallel_oops_do(closures->weak_oops());
307 }
308
309 void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
310 G1GCPhaseTimes* phase_times,
311 uint worker_i) {
312 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
313 CodeCache::blobs_do(code_closure);
314 }
315 }
316
317 uint G1RootProcessor::n_workers() const {
318 return _srs.n_threads();
319 }
|