83 }
84
85 template <typename ITR>
86 void ShenandoahRootScanner<ITR>::strong_roots_do(uint worker_id, OopClosure* oops) {
87 CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
88 MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations);
89 strong_roots_do(worker_id, oops, &clds_cl, &blobs_cl);
90 }
91
92 template <typename ITR>
93 void ShenandoahRootScanner<ITR>::roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure *tc) {
94 assert(!ShenandoahSafepoint::is_at_shenandoah_safepoint() ||
95 !ShenandoahHeap::heap()->unload_classes() ||
96 ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc(),
97 "Expect class unloading or traversal when Shenandoah cycle is running");
98 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
99 ResourceMark rm;
100
101 _serial_roots.oops_do(oops, worker_id);
102 _jni_roots.oops_do(oops, worker_id);
103 _cld_roots.clds_do(clds, clds, worker_id);
104 _thread_roots.threads_do(&tc_cl, worker_id);
105
106 // With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
107 // and instead do that in concurrent phase under the relevant lock. This saves init mark
108 // pause time.
109 if (code != NULL && !ShenandoahConcurrentScanCodeRoots) {
110 _code_roots.code_blobs_do(code, worker_id);
111 }
112 }
113
114 template <typename ITR>
115 void ShenandoahRootScanner<ITR>::roots_do_unchecked(OopClosure* oops) {
116 CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
117 MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations);
118 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL);
119 ResourceMark rm;
120
121 _serial_roots.oops_do(oops, 0);
122 _jni_roots.oops_do(oops, 0);
123 _cld_roots.clds_do(&clds, &clds, 0);
124 _thread_roots.threads_do(&tc_cl, 0);
125 _code_roots.code_blobs_do(&code, 0);
126 }
127
128 template <typename ITR>
129 void ShenandoahRootScanner<ITR>::strong_roots_do_unchecked(OopClosure* oops) {
130 CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
131 MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations);
132 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL);
133 ResourceMark rm;
134
135 _serial_roots.oops_do(oops, 0);
136 _jni_roots.oops_do(oops, 0);
137 _cld_roots.clds_do(&clds, NULL, 0);
138 _thread_roots.threads_do(&tc_cl, 0);
139 }
140
141 template <typename ITR>
142 void ShenandoahRootScanner<ITR>::strong_roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure* tc) {
143 assert(ShenandoahHeap::heap()->unload_classes(), "Should be used during class unloading");
144 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
145 ResourceMark rm;
146
147 _serial_roots.oops_do(oops, worker_id);
148 _jni_roots.oops_do(oops, worker_id);
149 _cld_roots.clds_do(clds, NULL, worker_id);
150 _thread_roots.threads_do(&tc_cl, worker_id);
151 }
152
153 template <typename IsAlive, typename KeepAlive>
154 void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive) {
155 CodeBlobToOopClosure update_blobs(keep_alive, CodeBlobToOopClosure::FixRelocations);
156 CLDToOopClosure clds(keep_alive, ClassLoaderData::_claim_strong);
157 CLDToOopClosure* weak_clds = ShenandoahHeap::heap()->unload_classes() ? NULL : &clds;
158
159 _serial_roots.oops_do(keep_alive, worker_id);
160 _jni_roots.oops_do(keep_alive, worker_id);
161
162 _thread_roots.oops_do(keep_alive, NULL, worker_id);
163 _cld_roots.clds_do(&clds, weak_clds, worker_id);
164
165 if(_update_code_cache) {
166 _code_roots.code_blobs_do(&update_blobs, worker_id);
167 }
168
169 _weak_roots.oops_do<IsAlive, KeepAlive>(is_alive, keep_alive, worker_id);
170 _dedup_roots.oops_do(is_alive, keep_alive, worker_id);
171 }
172
173 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP
|
83 }
84
85 template <typename ITR>
86 void ShenandoahRootScanner<ITR>::strong_roots_do(uint worker_id, OopClosure* oops) {
87 CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
88 MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations);
89 strong_roots_do(worker_id, oops, &clds_cl, &blobs_cl);
90 }
91
92 template <typename ITR>
93 void ShenandoahRootScanner<ITR>::roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure *tc) {
94 assert(!ShenandoahSafepoint::is_at_shenandoah_safepoint() ||
95 !ShenandoahHeap::heap()->unload_classes() ||
96 ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc(),
97 "Expect class unloading or traversal when Shenandoah cycle is running");
98 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
99 ResourceMark rm;
100
101 _serial_roots.oops_do(oops, worker_id);
102 _jni_roots.oops_do(oops, worker_id);
103
104 if (clds != NULL) {
105 _cld_roots.cld_do(clds, worker_id);
106 } else {
107 assert(ShenandoahHeap::heap()->is_concurrent_traversal_in_progress(), "Only possible with traversal GC");
108 }
109
110 _thread_roots.threads_do(&tc_cl, worker_id);
111
112 // With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
113 // and instead do that in concurrent phase under the relevant lock. This saves init mark
114 // pause time.
115 if (code != NULL && !ShenandoahConcurrentScanCodeRoots) {
116 _code_roots.code_blobs_do(code, worker_id);
117 }
118 }
119
120 template <typename ITR>
121 void ShenandoahRootScanner<ITR>::roots_do_unchecked(OopClosure* oops) {
122 CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
123 MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations);
124 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL);
125 ResourceMark rm;
126
127 _serial_roots.oops_do(oops, 0);
128 _jni_roots.oops_do(oops, 0);
129 _cld_roots.cld_do(&clds, 0);
130 _thread_roots.threads_do(&tc_cl, 0);
131 _code_roots.code_blobs_do(&code, 0);
132 }
133
134 template <typename ITR>
135 void ShenandoahRootScanner<ITR>::strong_roots_do_unchecked(OopClosure* oops) {
136 CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
137 MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations);
138 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL);
139 ResourceMark rm;
140
141 _serial_roots.oops_do(oops, 0);
142 _jni_roots.oops_do(oops, 0);
143 _cld_roots.always_strong_cld_do(&clds, 0);
144 _thread_roots.threads_do(&tc_cl, 0);
145 }
146
147 template <typename ITR>
148 void ShenandoahRootScanner<ITR>::strong_roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code, ThreadClosure* tc) {
149 assert(ShenandoahHeap::heap()->unload_classes(), "Should be used during class unloading");
150 ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
151 ResourceMark rm;
152
153 _serial_roots.oops_do(oops, worker_id);
154 _jni_roots.oops_do(oops, worker_id);
155 _cld_roots.always_strong_cld_do(clds, worker_id);
156 _thread_roots.threads_do(&tc_cl, worker_id);
157 }
158
159 template <typename IsAlive, typename KeepAlive>
160 void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive) {
161 CodeBlobToOopClosure update_blobs(keep_alive, CodeBlobToOopClosure::FixRelocations);
162 CLDToOopClosure clds(keep_alive, ClassLoaderData::_claim_strong);
163
164 _serial_roots.oops_do(keep_alive, worker_id);
165 _jni_roots.oops_do(keep_alive, worker_id);
166
167 _thread_roots.oops_do(keep_alive, NULL, worker_id);
168 _cld_roots.cld_do(&clds, worker_id);
169
170 if(_update_code_cache) {
171 _code_roots.code_blobs_do(&update_blobs, worker_id);
172 }
173
174 _weak_roots.oops_do<IsAlive, KeepAlive>(is_alive, keep_alive, worker_id);
175 _dedup_roots.oops_do(is_alive, keep_alive, worker_id);
176 }
177
178 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP
|