57 }
58
59 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
60 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
61
62 if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
63 MonitorLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
64 while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
65 ml.wait(0);
66 }
67 }
68 }
69
70 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
71 _g1h(g1h),
72 _process_strong_tasks(G1RP_PS_NumElements),
73 _srs(n_workers),
74 _lock(Mutex::leaf, "G1 Root Scan barrier lock", false, Monitor::_safepoint_check_never),
75 _n_workers_discovered_strong_classes(0) {}
76
77 void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_i) {
78 G1GCPhaseTimes* phase_times = _g1h->phase_times();
79
80 G1EvacPhaseTimesTracker timer(phase_times, pss, G1GCPhaseTimes::ExtRootScan, worker_i);
81
82 G1EvacuationRootClosures* closures = pss->closures();
83 process_java_roots(closures, phase_times, worker_i, closures->trace_metadata() /* notify_claimed_roots_done */);
84
85 process_vm_roots(closures, phase_times, worker_i);
86
87 {
88 // Now the CM ref_processor roots.
89 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
90 if (_process_strong_tasks.try_claim_task(G1RP_PS_refProcessor_oops_do)) {
91 // We need to treat the discovered reference lists of the
92 // concurrent mark ref processor as roots and keep entries
93 // (which are added by the marking threads) on them live
94 // until they can be processed at the end of marking.
95 _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
96 }
97 }
98
99 if (closures->trace_metadata()) {
100 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongRoots, worker_i);
101 // Wait here to make sure all workers passed the strong nmethods phase.
102 wait_until_all_strong_classes_discovered();
103 }
104
105 _process_strong_tasks.all_tasks_completed(n_workers());
106 }
107
108 // Adaptor to pass the closures to the strong roots in the VM.
109 class StrongRootsClosures : public G1RootClosures {
110 OopClosure* _roots;
111 CLDClosure* _clds;
112 CodeBlobClosure* _blobs;
113 public:
114 StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
115 _roots(roots), _clds(clds), _blobs(blobs) {}
116
117 OopClosure* weak_oops() { return NULL; }
118 OopClosure* strong_oops() { return _roots; }
119
120 CLDClosure* weak_clds() { return NULL; }
154 // We don't want to visit code blobs more than once, so we return NULL for the
155 // strong case and walk the entire code cache as a separate step.
156 CodeBlobClosure* strong_codeblobs() { return NULL; }
157 };
158
159 void G1RootProcessor::process_all_roots(OopClosure* oops,
160 CLDClosure* clds,
161 CodeBlobClosure* blobs) {
162 AllRootsClosures closures(oops, clds);
163
164 process_java_roots(&closures, NULL, 0);
165 process_vm_roots(&closures, NULL, 0);
166
167 process_code_cache_roots(blobs, NULL, 0);
168
169 _process_strong_tasks.all_tasks_completed(n_workers());
170 }
171
172 void G1RootProcessor::process_java_roots(G1RootClosures* closures,
173 G1GCPhaseTimes* phase_times,
174 uint worker_i,
175 bool notify_claimed_roots_done) {
176 // Iterating over the the threads is done early to allow us to make sure that
177 // the "strong" nmethods are processed first using the strong closure. After a barrier,
178 // let the thread process the weak nmethods.
179 // The problem is that nmethods are claimed to avoid duplicate iteration. This is
180 // a way to make sure that for these nmethods we always apply the strong closure.
181 {
182 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
183 bool is_par = n_workers() > 1;
184 Threads::possibly_parallel_oops_do(is_par,
185 closures->strong_oops(),
186 closures->strong_codeblobs());
187 }
188
189 // This is the point where this worker thread will not find more strong nmethods.
190 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
191 if (notify_claimed_roots_done) {
192 worker_has_discovered_all_strong_classes();
193 }
194
195 {
196 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
197 if (_process_strong_tasks.try_claim_task(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
198 ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
199 }
200 }
201 }
202
203 void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
204 G1GCPhaseTimes* phase_times,
205 uint worker_i) {
206 OopClosure* strong_roots = closures->strong_oops();
207
208 {
209 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
210 if (_process_strong_tasks.try_claim_task(G1RP_PS_Universe_oops_do)) {
211 Universe::oops_do(strong_roots);
212 }
213 }
214
215 {
216 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
217 if (_process_strong_tasks.try_claim_task(G1RP_PS_JNIHandles_oops_do)) {
218 JNIHandles::oops_do(strong_roots);
219 }
220 }
221
222 {
223 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_i);
224 if (_process_strong_tasks.try_claim_task(G1RP_PS_ObjectSynchronizer_oops_do)) {
225 ObjectSynchronizer::oops_do(strong_roots);
226 }
227 }
228
229 {
230 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
231 if (_process_strong_tasks.try_claim_task(G1RP_PS_Management_oops_do)) {
232 Management::oops_do(strong_roots);
233 }
234 }
235
236 {
237 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
238 if (_process_strong_tasks.try_claim_task(G1RP_PS_jvmti_oops_do)) {
239 JvmtiExport::oops_do(strong_roots);
240 }
241 }
242
243 #if INCLUDE_AOT
244 if (UseAOT) {
245 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_i);
246 if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
247 AOTLoader::oops_do(strong_roots);
248 }
249 }
250 #endif
251
252 {
253 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
254 if (_process_strong_tasks.try_claim_task(G1RP_PS_SystemDictionary_oops_do)) {
255 SystemDictionary::oops_do(strong_roots);
256 }
257 }
258 }
259
260 void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
261 G1GCPhaseTimes* phase_times,
262 uint worker_i) {
263 if (_process_strong_tasks.try_claim_task(G1RP_PS_CodeCache_oops_do)) {
264 CodeCache::blobs_do(code_closure);
265 }
266 }
267
268 uint G1RootProcessor::n_workers() const {
269 return _srs.n_threads();
270 }
|
57 }
58
59 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
60 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
61
62 if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
63 MonitorLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
64 while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
65 ml.wait(0);
66 }
67 }
68 }
69
70 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
71 _g1h(g1h),
72 _process_strong_tasks(G1RP_PS_NumElements),
73 _srs(n_workers),
74 _lock(Mutex::leaf, "G1 Root Scan barrier lock", false, Monitor::_safepoint_check_never),
75 _n_workers_discovered_strong_classes(0) {}
76
77 void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_id) {
78 G1GCPhaseTimes* phase_times = _g1h->phase_times();
79
80 G1EvacPhaseTimesTracker timer(phase_times, pss, G1GCPhaseTimes::ExtRootScan, worker_id);
81
82 G1EvacuationRootClosures* closures = pss->closures();
83 process_java_roots(closures, phase_times, worker_id, closures->trace_metadata() /* notify_claimed_roots_done */);
84
85 process_vm_roots(closures, phase_times, worker_id);
86
87 {
88 // Now the CM ref_processor roots.
89 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_id);
90 if (_process_strong_tasks.try_claim_task(G1RP_PS_refProcessor_oops_do)) {
91 // We need to treat the discovered reference lists of the
92 // concurrent mark ref processor as roots and keep entries
93 // (which are added by the marking threads) on them live
94 // until they can be processed at the end of marking.
95 _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
96 }
97 }
98
99 if (closures->trace_metadata()) {
100 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongRoots, worker_id);
101 // Wait here to make sure all workers passed the strong nmethods phase.
102 wait_until_all_strong_classes_discovered();
103 }
104
105 _process_strong_tasks.all_tasks_completed(n_workers());
106 }
107
108 // Adaptor to pass the closures to the strong roots in the VM.
109 class StrongRootsClosures : public G1RootClosures {
110 OopClosure* _roots;
111 CLDClosure* _clds;
112 CodeBlobClosure* _blobs;
113 public:
114 StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
115 _roots(roots), _clds(clds), _blobs(blobs) {}
116
117 OopClosure* weak_oops() { return NULL; }
118 OopClosure* strong_oops() { return _roots; }
119
120 CLDClosure* weak_clds() { return NULL; }
154 // We don't want to visit code blobs more than once, so we return NULL for the
155 // strong case and walk the entire code cache as a separate step.
156 CodeBlobClosure* strong_codeblobs() { return NULL; }
157 };
158
159 void G1RootProcessor::process_all_roots(OopClosure* oops,
160 CLDClosure* clds,
161 CodeBlobClosure* blobs) {
162 AllRootsClosures closures(oops, clds);
163
164 process_java_roots(&closures, NULL, 0);
165 process_vm_roots(&closures, NULL, 0);
166
167 process_code_cache_roots(blobs, NULL, 0);
168
169 _process_strong_tasks.all_tasks_completed(n_workers());
170 }
171
172 void G1RootProcessor::process_java_roots(G1RootClosures* closures,
173 G1GCPhaseTimes* phase_times,
174 uint worker_id,
175 bool notify_claimed_roots_done) {
176 // Iterating over the the threads is done early to allow us to make sure that
177 // the "strong" nmethods are processed first using the strong closure. After a barrier,
178 // let the thread process the weak nmethods.
179 // The problem is that nmethods are claimed to avoid duplicate iteration. This is
180 // a way to make sure that for these nmethods we always apply the strong closure.
181 {
182 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_id);
183 bool is_par = n_workers() > 1;
184 Threads::possibly_parallel_oops_do(is_par,
185 closures->strong_oops(),
186 closures->strong_codeblobs());
187 }
188
189 // This is the point where this worker thread will not find more strong nmethods.
190 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
191 if (notify_claimed_roots_done) {
192 worker_has_discovered_all_strong_classes();
193 }
194
195 {
196 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_id);
197 if (_process_strong_tasks.try_claim_task(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
198 ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
199 }
200 }
201 }
202
203 void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
204 G1GCPhaseTimes* phase_times,
205 uint worker_id) {
206 OopClosure* strong_roots = closures->strong_oops();
207
208 {
209 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_id);
210 if (_process_strong_tasks.try_claim_task(G1RP_PS_Universe_oops_do)) {
211 Universe::oops_do(strong_roots);
212 }
213 }
214
215 {
216 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_id);
217 if (_process_strong_tasks.try_claim_task(G1RP_PS_JNIHandles_oops_do)) {
218 JNIHandles::oops_do(strong_roots);
219 }
220 }
221
222 {
223 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_id);
224 if (_process_strong_tasks.try_claim_task(G1RP_PS_ObjectSynchronizer_oops_do)) {
225 ObjectSynchronizer::oops_do(strong_roots);
226 }
227 }
228
229 {
230 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_id);
231 if (_process_strong_tasks.try_claim_task(G1RP_PS_Management_oops_do)) {
232 Management::oops_do(strong_roots);
233 }
234 }
235
236 {
237 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_id);
238 if (_process_strong_tasks.try_claim_task(G1RP_PS_jvmti_oops_do)) {
239 JvmtiExport::oops_do(strong_roots);
240 }
241 }
242
243 #if INCLUDE_AOT
244 if (UseAOT) {
245 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);
246 if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
247 AOTLoader::oops_do(strong_roots);
248 }
249 }
250 #endif
251
252 {
253 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_id);
254 if (_process_strong_tasks.try_claim_task(G1RP_PS_SystemDictionary_oops_do)) {
255 SystemDictionary::oops_do(strong_roots);
256 }
257 }
258 }
259
260 void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
261 G1GCPhaseTimes* phase_times,
262 uint worker_id) {
263 if (_process_strong_tasks.try_claim_task(G1RP_PS_CodeCache_oops_do)) {
264 CodeCache::blobs_do(code_closure);
265 }
266 }
267
268 uint G1RootProcessor::n_workers() const {
269 return _srs.n_threads();
270 }
|