203 ~ShenandoahDisarmNMethodsTask() {
204 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
205 _iterator.nmethods_do_end();
206 }
207
208 virtual void work(uint worker_id) {
209 ShenandoahParallelWorkerSession worker_session(worker_id);
210 _iterator.nmethods_do(&_cl);
211 }
212 };
213
214 void ShenandoahCodeRoots::disarm_nmethods() {
215 ShenandoahDisarmNMethodsTask task;
216 ShenandoahHeap::heap()->workers()->run_task(&task);
217 }
218
219 class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
220 private:
221 bool _unloading_occurred;
222 volatile bool _failed;
223 ShenandoahHeap* const _heap;
224 BarrierSetNMethod* const _bs;
225
226 void set_failed() {
227 Atomic::store(&_failed, true);
228 }
229
230 void unlink(nmethod* nm) {
231 // Unlinking of the dependencies must happen before the
232 // handshake separating unlink and purge.
233 nm->flush_dependencies(false /* delete_immediately */);
234
235 // unlink_from_method will take the CompiledMethod_lock.
236 // In this case we don't strictly need it when unlinking nmethods from
237 // the Method, because it is only concurrently unlinked by
238 // the entry barrier, which acquires the per nmethod lock.
239 nm->unlink_from_method();
240
241 if (nm->is_osr_method()) {
242 // Invalidate the osr nmethod only once
243 nm->invalidate_osr_method();
244 }
245 }
246 public:
247 ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
248 _unloading_occurred(unloading_occurred),
249 _failed(false),
250 _heap(ShenandoahHeap::heap()),
251 _bs(ShenandoahBarrierSet::barrier_set()->barrier_set_nmethod()) {}
252
253 virtual void do_nmethod(nmethod* nm) {
254 assert(_heap->is_concurrent_weak_root_in_progress(), "Only this phase");
255 if (failed()) {
256 return;
257 }
258
259 ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
260 assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
261
262 if (!nm->is_alive()) {
263 return;
264 }
265
266 if (nm->is_unloading()) {
267 ShenandoahReentrantLocker locker(nm_data->lock());
268 unlink(nm);
269 return;
270 }
271
272 ShenandoahReentrantLocker locker(nm_data->lock());
273
274 // Heal oops and disarm
275 if (_bs->is_armed(nm)) {
276 ShenandoahNMethod::heal_nmethod_metadata(nm_data);
277 _bs->disarm(nm);
278 }
279
280 // Clear compiled ICs and exception caches
281 if (!nm->unload_nmethod_caches(_unloading_occurred)) {
282 set_failed();
283 }
284 }
285
286 bool failed() const {
287 return Atomic::load(&_failed);
288 }
289 };
290
291 class ShenandoahUnlinkTask : public AbstractGangTask {
292 private:
293 ShenandoahNMethodUnlinkClosure _cl;
294 ICRefillVerifier* _verifier;
295 ShenandoahConcurrentNMethodIterator _iterator;
296
297 public:
298 ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
299 AbstractGangTask("ShenandoahNMethodUnlinkTask"),
300 _cl(unloading_occurred),
301 _verifier(verifier),
302 _iterator(ShenandoahCodeRoots::table()) {
303 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
304 _iterator.nmethods_do_begin();
305 }
306
307 ~ShenandoahUnlinkTask() {
308 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
309 _iterator.nmethods_do_end();
310 }
311
312 virtual void work(uint worker_id) {
313 ICRefillVerifierMark mark(_verifier);
314 ShenandoahEvacOOMScope evac_scope;
315 _iterator.nmethods_do(&_cl);
316 }
317
318 bool success() const {
319 return !_cl.failed();
320 }
321 };
322
323 void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) {
324 assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
325 "Only when running concurrent class unloading");
326
327 for (;;) {
328 ICRefillVerifier verifier;
329
330 {
331 ShenandoahUnlinkTask task(unloading_occurred, &verifier);
332 workers->run_task(&task);
333 if (task.success()) {
334 return;
335 }
336 }
337
338 // Cleaning failed because we ran out of transitional IC stubs,
339 // so we have to refill and try again. Refilling requires taking
340 // a safepoint, so we temporarily leave the suspendible thread set.
341 SuspendibleThreadSetLeaver sts;
342 InlineCacheBuffer::refill_ic_stubs();
343 }
344 }
345
346 class ShenandoahNMethodPurgeClosure : public NMethodClosure {
347 public:
348 virtual void do_nmethod(nmethod* nm) {
349 if (nm->is_alive() && nm->is_unloading()) {
350 nm->make_unloaded();
351 }
352 }
353 };
354
355 class ShenandoahNMethodPurgeTask : public AbstractGangTask {
356 private:
357 ShenandoahNMethodPurgeClosure _cl;
358 ShenandoahConcurrentNMethodIterator _iterator;
359
360 public:
361 ShenandoahNMethodPurgeTask() :
362 AbstractGangTask("ShenandoahNMethodPurgeTask"),
|
203 ~ShenandoahDisarmNMethodsTask() {
204 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
205 _iterator.nmethods_do_end();
206 }
207
208 virtual void work(uint worker_id) {
209 ShenandoahParallelWorkerSession worker_session(worker_id);
210 _iterator.nmethods_do(&_cl);
211 }
212 };
213
214 void ShenandoahCodeRoots::disarm_nmethods() {
215 ShenandoahDisarmNMethodsTask task;
216 ShenandoahHeap::heap()->workers()->run_task(&task);
217 }
218
219 class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
220 private:
221 bool _unloading_occurred;
222 volatile bool _failed;
223 bool _refill_icBuffer;
224 ShenandoahHeap* const _heap;
225 BarrierSetNMethod* const _bs;
226
227 void set_failed() {
228 Atomic::store(&_failed, true);
229 }
230
231 void unlink(nmethod* nm) {
232 // Unlinking of the dependencies must happen before the
233 // handshake separating unlink and purge.
234 nm->flush_dependencies(false /* delete_immediately */);
235
236 // unlink_from_method will take the CompiledMethod_lock.
237 // In this case we don't strictly need it when unlinking nmethods from
238 // the Method, because it is only concurrently unlinked by
239 // the entry barrier, which acquires the per nmethod lock.
240 nm->unlink_from_method();
241
242 if (nm->is_osr_method()) {
243 // Invalidate the osr nmethod only once
244 nm->invalidate_osr_method();
245 }
246 }
247 public:
248 ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
249 _unloading_occurred(unloading_occurred),
250 _failed(false),
251 _refill_icBuffer(false),
252 _heap(ShenandoahHeap::heap()),
253 _bs(ShenandoahBarrierSet::barrier_set()->barrier_set_nmethod()) {}
254
255 virtual void do_nmethod(nmethod* nm) {
256 assert(_heap->is_concurrent_weak_root_in_progress(), "Only this phase");
257 if (failed()) {
258 return;
259 }
260
261 ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
262 assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
263
264 if (!nm->is_alive()) {
265 return;
266 }
267
268 if (nm->is_unloading()) {
269 ShenandoahAbortableNMethodLocker locker(nm_data->lock());
270 if (locker.aborted()) {
271 set_failed();
272 } else {
273 unlink(nm);
274 }
275 return;
276 }
277
278 ShenandoahAbortableNMethodLocker locker(nm_data->lock());
279 if (locker.aborted()) {
280 set_failed();
281 return;
282 }
283
284 // Heal oops and disarm
285 if (_bs->is_armed(nm)) {
286 ShenandoahNMethod::heal_nmethod_metadata(nm_data);
287 _bs->disarm(nm);
288 }
289
290 // Clear compiled ICs and exception caches
291 if (!nm->unload_nmethod_caches(_unloading_occurred)) {
292 _refill_icBuffer = true;
293 set_failed();
294 }
295 }
296
297 bool need_refill_icBuffer() const {
298 return _refill_icBuffer;
299 }
300
301 bool failed() const {
302 return Atomic::load(&_failed);
303 }
304 };
305
306 class ShenandoahUnlinkTask : public AbstractGangTask {
307 private:
308 ShenandoahNMethodUnlinkClosure _cl;
309 ICRefillVerifier* _verifier;
310 ShenandoahConcurrentNMethodIterator _iterator;
311
312 public:
313 ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
314 AbstractGangTask("ShenandoahNMethodUnlinkTask"),
315 _cl(unloading_occurred),
316 _verifier(verifier),
317 _iterator(ShenandoahCodeRoots::table()) {
318 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
319 _iterator.nmethods_do_begin();
320 }
321
322 ~ShenandoahUnlinkTask() {
323 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
324 _iterator.nmethods_do_end();
325 }
326
327 virtual void work(uint worker_id) {
328 ICRefillVerifierMark mark(_verifier);
329 ShenandoahEvacOOMScope evac_scope;
330 _iterator.nmethods_do(&_cl);
331 }
332
333 bool success() const {
334 return !_cl.failed();
335 }
336
337 bool need_refill_icBuffer() const {
338 return _cl.need_refill_icBuffer();
339 }
340 };
341
342 void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) {
343 assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
344 "Only when running concurrent class unloading");
345
346 for (;;) {
347 ICRefillVerifier verifier;
348 bool need_refill_icBuffer = false;
349
350 {
351 ShenandoahUnlinkTask task(unloading_occurred, &verifier);
352 workers->run_task(&task);
353 if (task.success()) {
354 return;
355 }
356 need_refill_icBuffer = task.need_refill_icBuffer();
357 }
358
359 if (need_refill_icBuffer) {
360 // Cleaning failed because we ran out of transitional IC stubs,
361 // so we have to refill and try again. Refilling requires taking
362 // a safepoint, so we temporarily leave the suspendible thread set.
363 SuspendibleThreadSetLeaver sts;
364 InlineCacheBuffer::refill_ic_stubs();
365 }
366 }
367 }
368
369 class ShenandoahNMethodPurgeClosure : public NMethodClosure {
370 public:
371 virtual void do_nmethod(nmethod* nm) {
372 if (nm->is_alive() && nm->is_unloading()) {
373 nm->make_unloaded();
374 }
375 }
376 };
377
378 class ShenandoahNMethodPurgeTask : public AbstractGangTask {
379 private:
380 ShenandoahNMethodPurgeClosure _cl;
381 ShenandoahConcurrentNMethodIterator _iterator;
382
383 public:
384 ShenandoahNMethodPurgeTask() :
385 AbstractGangTask("ShenandoahNMethodPurgeTask"),
|