162 _serial_roots(phase),
163 _vm_roots(phase),
164 _cld_roots(phase),
165 _thread_roots(phase, n_workers > 1),
166 _serial_weak_roots(phase),
167 _weak_roots(phase),
168 _dedup_roots(phase),
169 _code_roots(phase),
170 _stw_roots_processing(stw_roots_processing),
171 _stw_class_unloading(stw_class_unloading) {
172 }
173
174 void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) {
175 MarkingCodeBlobClosure blobsCl(oops, CodeBlobToOopClosure::FixRelocations);
176 ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(oops);
177 CodeBlobToOopClosure* codes_cl = ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() ?
178 static_cast<CodeBlobToOopClosure*>(&blobs_and_disarm_Cl) :
179 static_cast<CodeBlobToOopClosure*>(&blobsCl);
180 AlwaysTrueClosure always_true;
181
182 _serial_roots.oops_do(oops, worker_id);
183 _serial_weak_roots.weak_oops_do(oops, worker_id);
184 if (_stw_roots_processing) {
185 _vm_roots.oops_do<OopClosure>(oops, worker_id);
186 _weak_roots.oops_do<OopClosure>(oops, worker_id);
187 }
188
189 if (_stw_class_unloading) {
190 CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
191 _cld_roots.cld_do(&clds, worker_id);
192 _code_roots.code_blobs_do(codes_cl, worker_id);
193 _thread_roots.oops_do(oops, NULL, worker_id);
194 } else {
195 _thread_roots.oops_do(oops, codes_cl, worker_id);
196 }
197
198 _dedup_roots.oops_do(&always_true, oops, worker_id);
199 }
200
201 ShenandoahRootUpdater::ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimings::Phase phase) :
202 ShenandoahRootProcessor(phase),
203 _serial_roots(phase),
204 _vm_roots(phase),
205 _cld_roots(phase),
206 _thread_roots(phase, n_workers > 1),
207 _serial_weak_roots(phase),
208 _weak_roots(phase),
209 _dedup_roots(phase),
210 _code_roots(phase) {
211 }
212
213 ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTimings::Phase phase) :
214 ShenandoahRootProcessor(phase),
215 _serial_roots(phase),
216 _vm_roots(phase),
217 _cld_roots(phase),
218 _thread_roots(phase, n_workers > 1),
219 _serial_weak_roots(phase),
220 _weak_roots(phase),
221 _dedup_roots(phase),
222 _code_roots(phase) {
223 assert(ShenandoahHeap::heap()->is_full_gc_in_progress(), "Full GC only");
224 }
225
226 void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
227 CodeBlobToOopClosure code_blob_cl(oops, CodeBlobToOopClosure::FixRelocations);
228 ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(oops);
229 CodeBlobToOopClosure* adjust_code_closure = ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() ?
230 static_cast<CodeBlobToOopClosure*>(&blobs_and_disarm_Cl) :
231 static_cast<CodeBlobToOopClosure*>(&code_blob_cl);
232 CLDToOopClosure adjust_cld_closure(oops, ClassLoaderData::_claim_strong);
233 AlwaysTrueClosure always_true;
234
235 _serial_roots.oops_do(oops, worker_id);
236 _vm_roots.oops_do(oops, worker_id);
237
238 _thread_roots.oops_do(oops, NULL, worker_id);
239 _cld_roots.cld_do(&adjust_cld_closure, worker_id);
240 _code_roots.code_blobs_do(adjust_code_closure, worker_id);
241
242 _serial_weak_roots.weak_oops_do(oops, worker_id);
243 _weak_roots.oops_do<OopClosure>(oops, worker_id);
244 _dedup_roots.oops_do(&always_true, oops, worker_id);
245 }
246
247 ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
248 ShenandoahRootProcessor(ShenandoahPhaseTimings::heap_iteration_roots),
249 _serial_roots(ShenandoahPhaseTimings::heap_iteration_roots),
250 _thread_roots(ShenandoahPhaseTimings::heap_iteration_roots, false /*is par*/),
251 _vm_roots(ShenandoahPhaseTimings::heap_iteration_roots),
252 _cld_roots(ShenandoahPhaseTimings::heap_iteration_roots),
253 _serial_weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
254 _weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
255 _dedup_roots(ShenandoahPhaseTimings::heap_iteration_roots),
256 _code_roots(ShenandoahPhaseTimings::heap_iteration_roots) {
257 }
258
259 void ShenandoahHeapIterationRootScanner::roots_do(OopClosure* oops) {
260 assert(Thread::current()->is_VM_thread(), "Only by VM thread");
261 // Must use _claim_none to avoid interfering with concurrent CLDG iteration
262 CLDToOopClosure clds(oops, ClassLoaderData::_claim_none);
263 MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations);
264 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL);
265 AlwaysTrueClosure always_true;
266 ResourceMark rm;
267
268 _serial_roots.oops_do(oops, 0);
269 _vm_roots.oops_do(oops, 0);
270 _cld_roots.cld_do(&clds, 0);
271 _thread_roots.threads_do(&tc_cl, 0);
272 _code_roots.code_blobs_do(&code, 0);
273
274 _serial_weak_roots.weak_oops_do(oops, 0);
275 _weak_roots.oops_do<OopClosure>(oops, 0);
276 _dedup_roots.oops_do(&always_true, oops, 0);
277 }
|
162 _serial_roots(phase),
163 _vm_roots(phase),
164 _cld_roots(phase),
165 _thread_roots(phase, n_workers > 1),
166 _serial_weak_roots(phase),
167 _weak_roots(phase),
168 _dedup_roots(phase),
169 _code_roots(phase),
170 _stw_roots_processing(stw_roots_processing),
171 _stw_class_unloading(stw_class_unloading) {
172 }
173
174 void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) {
175 MarkingCodeBlobClosure blobsCl(oops, CodeBlobToOopClosure::FixRelocations);
176 ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(oops);
177 CodeBlobToOopClosure* codes_cl = ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() ?
178 static_cast<CodeBlobToOopClosure*>(&blobs_and_disarm_Cl) :
179 static_cast<CodeBlobToOopClosure*>(&blobsCl);
180 AlwaysTrueClosure always_true;
181
182 // Process serial-claiming roots first
183 _serial_roots.oops_do(oops, worker_id);
184 _serial_weak_roots.weak_oops_do(oops, worker_id);
185
186 // Process light-weight/limited parallel roots then
187 if (_stw_roots_processing) {
188 _vm_roots.oops_do<OopClosure>(oops, worker_id);
189 _weak_roots.oops_do<OopClosure>(oops, worker_id);
190 }
191 _dedup_roots.oops_do(&always_true, oops, worker_id);
192
193 // Process heavy-weight/fully parallel roots the last
194 if (_stw_class_unloading) {
195 CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
196 _cld_roots.cld_do(&clds, worker_id);
197 _code_roots.code_blobs_do(codes_cl, worker_id);
198 _thread_roots.oops_do(oops, NULL, worker_id);
199 } else {
200 _thread_roots.oops_do(oops, codes_cl, worker_id);
201 }
202 }
203
204 ShenandoahRootUpdater::ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimings::Phase phase) :
205 ShenandoahRootProcessor(phase),
206 _serial_roots(phase),
207 _vm_roots(phase),
208 _cld_roots(phase),
209 _thread_roots(phase, n_workers > 1),
210 _serial_weak_roots(phase),
211 _weak_roots(phase),
212 _dedup_roots(phase),
213 _code_roots(phase) {
214 }
215
216 ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTimings::Phase phase) :
217 ShenandoahRootProcessor(phase),
218 _serial_roots(phase),
219 _vm_roots(phase),
220 _cld_roots(phase),
221 _thread_roots(phase, n_workers > 1),
222 _serial_weak_roots(phase),
223 _weak_roots(phase),
224 _dedup_roots(phase),
225 _code_roots(phase) {
226 assert(ShenandoahHeap::heap()->is_full_gc_in_progress(), "Full GC only");
227 }
228
229 void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
230 CodeBlobToOopClosure code_blob_cl(oops, CodeBlobToOopClosure::FixRelocations);
231 ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(oops);
232 CodeBlobToOopClosure* adjust_code_closure = ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() ?
233 static_cast<CodeBlobToOopClosure*>(&blobs_and_disarm_Cl) :
234 static_cast<CodeBlobToOopClosure*>(&code_blob_cl);
235 CLDToOopClosure adjust_cld_closure(oops, ClassLoaderData::_claim_strong);
236 AlwaysTrueClosure always_true;
237
238 // Process serial-claiming roots first
239 _serial_roots.oops_do(oops, worker_id);
240 _serial_weak_roots.weak_oops_do(oops, worker_id);
241
242 // Process light-weight/limited parallel roots then
243 _vm_roots.oops_do(oops, worker_id);
244 _weak_roots.oops_do<OopClosure>(oops, worker_id);
245 _dedup_roots.oops_do(&always_true, oops, worker_id);
246
247 // Process heavy-weight/fully parallel roots the last
248 _cld_roots.cld_do(&adjust_cld_closure, worker_id);
249 _code_roots.code_blobs_do(adjust_code_closure, worker_id);
250 _thread_roots.oops_do(oops, NULL, worker_id);
251 }
252
253 ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
254 ShenandoahRootProcessor(ShenandoahPhaseTimings::heap_iteration_roots),
255 _serial_roots(ShenandoahPhaseTimings::heap_iteration_roots),
256 _thread_roots(ShenandoahPhaseTimings::heap_iteration_roots, false /*is par*/),
257 _vm_roots(ShenandoahPhaseTimings::heap_iteration_roots),
258 _cld_roots(ShenandoahPhaseTimings::heap_iteration_roots),
259 _serial_weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
260 _weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
261 _dedup_roots(ShenandoahPhaseTimings::heap_iteration_roots),
262 _code_roots(ShenandoahPhaseTimings::heap_iteration_roots) {
263 }
264
265 void ShenandoahHeapIterationRootScanner::roots_do(OopClosure* oops) {
266 assert(Thread::current()->is_VM_thread(), "Only by VM thread");
267 // Must use _claim_none to avoid interfering with concurrent CLDG iteration
268 CLDToOopClosure clds(oops, ClassLoaderData::_claim_none);
269 MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations);
270 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL);
271 AlwaysTrueClosure always_true;
272
273 ResourceMark rm;
274
275 // Process serial-claiming roots first
276 _serial_roots.oops_do(oops, 0);
277 _serial_weak_roots.weak_oops_do(oops, 0);
278
279 // Process light-weight/limited parallel roots then
280 _vm_roots.oops_do(oops, 0);
281 _weak_roots.oops_do<OopClosure>(oops, 0);
282 _dedup_roots.oops_do(&always_true, oops, 0);
283
284 // Process heavy-weight/fully parallel roots the last
285 _cld_roots.cld_do(&clds, 0);
286 _code_roots.code_blobs_do(&code, 0);
287 _thread_roots.threads_do(&tc_cl, 0);
288 }
|