66 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
67 _g1h(g1h),
68 _process_strong_tasks(G1RP_PS_NumElements),
69 _srs(n_workers),
70 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
71 _n_workers_discovered_strong_classes(0) {}
72
73 void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i) {
74 double ext_roots_start = os::elapsedTime();
75 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
76
77 process_java_roots(closures, phase_times, worker_i);
78
79 // This is the point where this worker thread will not find more strong CLDs/nmethods.
80 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
81 if (closures->trace_metadata()) {
82 worker_has_discovered_all_strong_classes();
83 }
84
85 process_vm_roots(closures, phase_times, worker_i);
86
87 {
88 // Now the CM ref_processor roots.
89 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
90 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
91 // We need to treat the discovered reference lists of the
92 // concurrent mark ref processor as roots and keep entries
93 // (which are added by the marking threads) on them live
94 // until they can be processed at the end of marking.
95 _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
96 }
97 }
98
99 if (closures->trace_metadata()) {
100 {
101 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
102 // Barrier to make sure all workers passed
103 // the strong CLD and strong nmethods phases.
104 wait_until_all_strong_classes_discovered();
105 }
174 public:
175 AllRootsClosures(OopClosure* roots, CLDClosure* clds) :
176 _roots(roots), _clds(clds) {}
177
178 OopClosure* weak_oops() { return _roots; }
179 OopClosure* strong_oops() { return _roots; }
180
181 // By returning the same CLDClosure for both weak and strong CLDs we ensure
182 // that a single walk of the CLDG will invoke the closure on all CLDs i the
183 // system.
184 CLDClosure* weak_clds() { return _clds; }
185 CLDClosure* strong_clds() { return _clds; }
186
187 // We don't want to visit code blobs more than once, so we return NULL for the
188 // strong case and walk the entire code cache as a separate step.
189 CodeBlobClosure* strong_codeblobs() { return NULL; }
190 };
191
192 void G1RootProcessor::process_all_roots(OopClosure* oops,
193 CLDClosure* clds,
194 CodeBlobClosure* blobs) {
195 AllRootsClosures closures(oops, clds);
196
197 process_java_roots(&closures, NULL, 0);
198 process_vm_roots(&closures, NULL, 0);
199
200 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
201 CodeCache::blobs_do(blobs);
202 }
203
204 _process_strong_tasks.all_tasks_completed(n_workers());
205 }
206
207 void G1RootProcessor::process_java_roots(G1RootClosures* closures,
208 G1GCPhaseTimes* phase_times,
209 uint worker_i) {
210 // Iterating over the CLDG and the Threads are done early to allow us to
211 // first process the strong CLDs and nmethods and then, after a barrier,
212 // let the thread process the weak CLDs and nmethods.
213 {
214 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
215 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
216 ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
217 }
218 }
219
220 {
221 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
222 bool is_par = n_workers() > 1;
223 Threads::possibly_parallel_oops_do(is_par,
224 closures->strong_oops(),
225 closures->strong_codeblobs());
226 }
263 {
264 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
265 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
266 Management::oops_do(strong_roots);
267 }
268 }
269
270 {
271 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
272 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) {
273 JvmtiExport::oops_do(strong_roots);
274 }
275 }
276
277 {
278 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
279 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
280 SystemDictionary::roots_oops_do(strong_roots, weak_roots);
281 }
282 }
283
284 {
285 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
286 // All threads execute the following. A specific chunk of buckets
287 // from the StringTable are the individual tasks.
288 if (weak_roots != NULL) {
289 StringTable::possibly_parallel_oops_do(weak_roots);
290 }
291 }
292 }
293
294 uint G1RootProcessor::n_workers() const {
295 return _srs.n_threads();
296 }
|
66 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
67 _g1h(g1h),
68 _process_strong_tasks(G1RP_PS_NumElements),
69 _srs(n_workers),
70 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
71 _n_workers_discovered_strong_classes(0) {}
72
73 void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i) {
74 double ext_roots_start = os::elapsedTime();
75 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
76
77 process_java_roots(closures, phase_times, worker_i);
78
79 // This is the point where this worker thread will not find more strong CLDs/nmethods.
80 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
81 if (closures->trace_metadata()) {
82 worker_has_discovered_all_strong_classes();
83 }
84
85 process_vm_roots(closures, phase_times, worker_i);
86 process_string_table_roots(closures, phase_times, worker_i);
87
88 {
89 // Now the CM ref_processor roots.
90 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
91 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
92 // We need to treat the discovered reference lists of the
93 // concurrent mark ref processor as roots and keep entries
94 // (which are added by the marking threads) on them live
95 // until they can be processed at the end of marking.
96 _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
97 }
98 }
99
100 if (closures->trace_metadata()) {
101 {
102 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
103 // Barrier to make sure all workers passed
104 // the strong CLD and strong nmethods phases.
105 wait_until_all_strong_classes_discovered();
106 }
175 public:
176 AllRootsClosures(OopClosure* roots, CLDClosure* clds) :
177 _roots(roots), _clds(clds) {}
178
179 OopClosure* weak_oops() { return _roots; }
180 OopClosure* strong_oops() { return _roots; }
181
182 // By returning the same CLDClosure for both weak and strong CLDs we ensure
183 // that a single walk of the CLDG will invoke the closure on all CLDs i the
184 // system.
185 CLDClosure* weak_clds() { return _clds; }
186 CLDClosure* strong_clds() { return _clds; }
187
188 // We don't want to visit code blobs more than once, so we return NULL for the
189 // strong case and walk the entire code cache as a separate step.
190 CodeBlobClosure* strong_codeblobs() { return NULL; }
191 };
192
193 void G1RootProcessor::process_all_roots(OopClosure* oops,
194 CLDClosure* clds,
195 CodeBlobClosure* blobs,
196 bool process_string_table) {
197 AllRootsClosures closures(oops, clds);
198
199 process_java_roots(&closures, NULL, 0);
200 process_vm_roots(&closures, NULL, 0);
201
202 if (process_string_table) {
203 process_string_table_roots(&closures, NULL, 0);
204 }
205 process_code_cache_roots(blobs, NULL, 0);
206
207 _process_strong_tasks.all_tasks_completed(n_workers());
208 }
209
210 void G1RootProcessor::process_all_roots(OopClosure* oops,
211 CLDClosure* clds,
212 CodeBlobClosure* blobs) {
213 process_all_roots(oops, clds, blobs, true);
214 }
215
216 void G1RootProcessor::process_all_roots_no_string_table(OopClosure* oops,
217 CLDClosure* clds,
218 CodeBlobClosure* blobs) {
219 assert(!ClassUnloading, "Should only be used when class unloading is disabled");
220 process_all_roots(oops, clds, blobs, false);
221 }
222
223 void G1RootProcessor::process_java_roots(G1RootClosures* closures,
224 G1GCPhaseTimes* phase_times,
225 uint worker_i) {
226 // Iterating over the CLDG and the Threads are done early to allow us to
227 // first process the strong CLDs and nmethods and then, after a barrier,
228 // let the thread process the weak CLDs and nmethods.
229 {
230 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
231 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
232 ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
233 }
234 }
235
236 {
237 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
238 bool is_par = n_workers() > 1;
239 Threads::possibly_parallel_oops_do(is_par,
240 closures->strong_oops(),
241 closures->strong_codeblobs());
242 }
279 {
280 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
281 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
282 Management::oops_do(strong_roots);
283 }
284 }
285
286 {
287 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
288 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) {
289 JvmtiExport::oops_do(strong_roots);
290 }
291 }
292
293 {
294 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
295 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
296 SystemDictionary::roots_oops_do(strong_roots, weak_roots);
297 }
298 }
299 }
300
301 void G1RootProcessor::process_string_table_roots(G1RootClosures* closures,
302 G1GCPhaseTimes* phase_times,
303 uint worker_i) {
304 assert(closures->weak_oops() != NULL, "Should only be called when all roots are processed");
305 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
306 // All threads execute the following. A specific chunk of buckets
307 // from the StringTable are the individual tasks.
308 StringTable::possibly_parallel_oops_do(closures->weak_oops());
309 }
310
311 void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
312 G1GCPhaseTimes* phase_times,
313 uint worker_i) {
314 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
315 CodeCache::blobs_do(code_closure);
316 }
317 }
318
319 uint G1RootProcessor::n_workers() const {
320 return _srs.n_threads();
321 }
|