208 }
209
210 template <typename ITR>
211 void ShenandoahRootScanner<ITR>::roots_do(uint worker_id, OopClosure* oops) {
212 CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
213 MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations);
214 roots_do(worker_id, oops, &clds_cl, &blobs_cl);
215 }
216
217 template <typename ITR>
218 void ShenandoahRootScanner<ITR>::strong_roots_do(uint worker_id, OopClosure* oops) {
219 CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
220 MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations);
221 strong_roots_do(worker_id, oops, &clds_cl, &blobs_cl);
222 }
223
224 template <typename ITR>
225 void ShenandoahRootScanner<ITR>::roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure *tc) {
226 assert(!ShenandoahSafepoint::is_at_shenandoah_safepoint() ||
227 !ShenandoahHeap::heap()->unload_classes() ||
228 ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc(),
229 "Expect class unloading or traversal when Shenandoah cycle is running");
230 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
231 ResourceMark rm;
232
233 _serial_roots.oops_do(oops, worker_id);
234 _jni_roots.oops_do(oops, worker_id);
235
236 if (clds != NULL) {
237 _cld_roots.cld_do(clds, worker_id);
238 } else {
239 assert(ShenandoahHeap::heap()->is_concurrent_traversal_in_progress(), "Only possible with traversal GC");
240 }
241
242 _thread_roots.threads_do(&tc_cl, worker_id);
243
244 // With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
245 // and instead do that in concurrent phase under the relevant lock. This saves init mark
246 // pause time.
247 if (code != NULL && !ShenandoahConcurrentScanCodeRoots) {
248 _code_roots.code_blobs_do(code, worker_id);
|
208 }
209
210 template <typename ITR>
211 void ShenandoahRootScanner<ITR>::roots_do(uint worker_id, OopClosure* oops) {
212 CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
213 MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations);
214 roots_do(worker_id, oops, &clds_cl, &blobs_cl);
215 }
216
217 template <typename ITR>
218 void ShenandoahRootScanner<ITR>::strong_roots_do(uint worker_id, OopClosure* oops) {
219 CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
220 MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations);
221 strong_roots_do(worker_id, oops, &clds_cl, &blobs_cl);
222 }
223
224 template <typename ITR>
225 void ShenandoahRootScanner<ITR>::roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure *tc) {
226 assert(!ShenandoahSafepoint::is_at_shenandoah_safepoint() ||
227 !ShenandoahHeap::heap()->unload_classes() ||
228 ShenandoahHeap::heap()->is_traversal_mode(),
229 "Expect class unloading or traversal when Shenandoah cycle is running");
230 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
231 ResourceMark rm;
232
233 _serial_roots.oops_do(oops, worker_id);
234 _jni_roots.oops_do(oops, worker_id);
235
236 if (clds != NULL) {
237 _cld_roots.cld_do(clds, worker_id);
238 } else {
239 assert(ShenandoahHeap::heap()->is_concurrent_traversal_in_progress(), "Only possible with traversal GC");
240 }
241
242 _thread_roots.threads_do(&tc_cl, worker_id);
243
244 // With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
245 // and instead do that in concurrent phase under the relevant lock. This saves init mark
246 // pause time.
247 if (code != NULL && !ShenandoahConcurrentScanCodeRoots) {
248 _code_roots.code_blobs_do(code, worker_id);
|