1 /*
  2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "code/relocInfo.hpp"
 26 #include "code/nmethod.hpp"
 27 #include "code/icBuffer.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetNMethod.hpp"
 30 #include "gc/z/zArray.inline.hpp"
 31 #include "gc/z/zGlobals.hpp"
 32 #include "gc/z/zHash.inline.hpp"
 33 #include "gc/z/zLock.inline.hpp"
 34 #include "gc/z/zNMethodTable.hpp"
 35 #include "gc/z/zOopClosures.inline.hpp"
 36 #include "gc/z/zTask.hpp"
 37 #include "gc/z/zWorkers.hpp"
 38 #include "logging/log.hpp"
 39 #include "memory/allocation.inline.hpp"
 40 #include "memory/resourceArea.hpp"
 41 #include "runtime/atomic.hpp"
 42 #include "runtime/orderAccess.hpp"
 43 #include "runtime/os.hpp"
 44 #include "utilities/debug.hpp"
 45 
 46 class ZNMethodDataImmediateOops {
 47 private:
 48   const size_t _nimmediate_oops;
 49 
 50   static size_t header_size();
 51 
 52   ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops);
 53 
 54 public:
 55   static ZNMethodDataImmediateOops* create(const GrowableArray<oop*>& immediate_oops);
 56   static void destroy(ZNMethodDataImmediateOops* data_immediate_oops);
 57 
 58   size_t immediate_oops_count() const;
 59   oop** immediate_oops_begin() const;
 60   oop** immediate_oops_end() const;
 61 };
 62 
 63 size_t ZNMethodDataImmediateOops::header_size() {
 64   const size_t size = sizeof(ZNMethodDataImmediateOops);
 65   assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
 66   return size;
 67 }
 68 
 69 ZNMethodDataImmediateOops* ZNMethodDataImmediateOops::create(const GrowableArray<oop*>& immediate_oops) {
 70   // Allocate memory for the ZNMethodDataImmediateOops object
 71   // plus the immediate oop* array that follows right after.
 72   const size_t size = ZNMethodDataImmediateOops::header_size() + (sizeof(oop*) * immediate_oops.length());
 73   void* const data_immediate_oops = NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
 74   return ::new (data_immediate_oops) ZNMethodDataImmediateOops(immediate_oops);
 75 }
 76 
 77 void ZNMethodDataImmediateOops::destroy(ZNMethodDataImmediateOops* data_immediate_oops) {
 78   ZNMethodTable::safe_delete(data_immediate_oops);
 79 }
 80 
 81 ZNMethodDataImmediateOops::ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops) :
 82     _nimmediate_oops(immediate_oops.length()) {
 83   // Save all immediate oops
 84   for (size_t i = 0; i < _nimmediate_oops; i++) {
 85     immediate_oops_begin()[i] = immediate_oops.at(i);
 86   }
 87 }
 88 
 89 size_t ZNMethodDataImmediateOops::immediate_oops_count() const {
 90   return _nimmediate_oops;
 91 }
 92 
 93 oop** ZNMethodDataImmediateOops::immediate_oops_begin() const {
 94   // The immediate oop* array starts immediately after this object
 95   return (oop**)((uintptr_t)this + header_size());
 96 }
 97 
 98 oop** ZNMethodDataImmediateOops::immediate_oops_end() const {
 99   return immediate_oops_begin() + immediate_oops_count();
100 }
101 
102 class ZNMethodData {
103 private:
104   ZReentrantLock                      _lock;
105   ZNMethodDataImmediateOops* volatile _immediate_oops;
106 
107   ZNMethodData(nmethod* nm);
108 
109 public:
110   static ZNMethodData* create(nmethod* nm);
111   static void destroy(ZNMethodData* data);
112 
113   ZReentrantLock* lock();
114 
115   ZNMethodDataImmediateOops* immediate_oops() const;
116   ZNMethodDataImmediateOops* swap_immediate_oops(const GrowableArray<oop*>& immediate_oops);
117 };
118 
119 ZNMethodData* ZNMethodData::create(nmethod* nm) {
120   void* const method = NEW_C_HEAP_ARRAY(uint8_t, sizeof(ZNMethodData), mtGC);
121   return ::new (method) ZNMethodData(nm);
122 }
123 
124 void ZNMethodData::destroy(ZNMethodData* data) {
125   ZNMethodDataImmediateOops::destroy(data->immediate_oops());
126   ZNMethodTable::safe_delete(data);
127 }
128 
129 ZNMethodData::ZNMethodData(nmethod* nm) :
130     _lock(),
131     _immediate_oops(NULL) {}
132 
133 ZReentrantLock* ZNMethodData::lock() {
134   return &_lock;
135 }
136 
137 ZNMethodDataImmediateOops* ZNMethodData::immediate_oops() const {
138   return OrderAccess::load_acquire(&_immediate_oops);
139 }
140 
141 ZNMethodDataImmediateOops* ZNMethodData::swap_immediate_oops(const GrowableArray<oop*>& immediate_oops) {
142   ZNMethodDataImmediateOops* const data_immediate_oops =
143     immediate_oops.is_empty() ? NULL : ZNMethodDataImmediateOops::create(immediate_oops);
144   return Atomic::xchg(data_immediate_oops, &_immediate_oops);
145 }
146 
147 static ZNMethodData* gc_data(const nmethod* nm) {
148   return nm->gc_data<ZNMethodData>();
149 }
150 
151 static void set_gc_data(nmethod* nm, ZNMethodData* data) {
152   return nm->set_gc_data<ZNMethodData>(data);
153 }
154 
155 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
156 size_t ZNMethodTable::_size = 0;
157 ZLock ZNMethodTable::_iter_lock;
158 ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
159 size_t ZNMethodTable::_iter_table_size = 0;
160 ZArray<void*> ZNMethodTable::_iter_deferred_deletes;
161 size_t ZNMethodTable::_nregistered = 0;
162 size_t ZNMethodTable::_nunregistered = 0;
163 volatile size_t ZNMethodTable::_claimed = 0;
164 
165 void ZNMethodTable::safe_delete(void* data) {
166   if (data == NULL) {
167     return;
168   }
169 
170   ZLocker<ZLock> locker(&_iter_lock);
171   if (_iter_table != NULL) {
172     // Iteration in progress, defer delete
173     _iter_deferred_deletes.add(data);
174   } else {
175     // Iteration not in progress, delete now
176     FREE_C_HEAP_ARRAY(uint8_t, data);
177   }
178 }
179 
180 ZNMethodTableEntry ZNMethodTable::create_entry(nmethod* nm) {
181   GrowableArray<oop*> immediate_oops;
182   bool non_immediate_oops = false;
183 
184   // Find all oops relocations
185   RelocIterator iter(nm);
186   while (iter.next()) {
187     if (iter.type() != relocInfo::oop_type) {
188       // Not an oop
189       continue;
190     }
191 
192     oop_Relocation* r = iter.oop_reloc();
193 
194     if (!r->oop_is_immediate()) {
195       // Non-immediate oop found
196       non_immediate_oops = true;
197       continue;
198     }
199 
200     if (r->oop_value() != NULL) {
201       // Non-NULL immediate oop found. NULL oops can safely be
202       // ignored since the method will be re-registered if they
203       // are later patched to be non-NULL.
204       immediate_oops.push(r->oop_addr());
205     }
206   }
207 
208   // Attach GC data to nmethod
209   ZNMethodData* data = gc_data(nm);
210   if (data == NULL) {
211     data = ZNMethodData::create(nm);
212     set_gc_data(nm, data);
213   }
214 
215   // Attach immediate oops in GC data
216   ZNMethodDataImmediateOops* const old_data_immediate_oops = data->swap_immediate_oops(immediate_oops);
217   ZNMethodDataImmediateOops::destroy(old_data_immediate_oops);
218 
219   // Create entry
220   return ZNMethodTableEntry(nm, non_immediate_oops, !immediate_oops.is_empty());
221 }
222 
223 ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) {
224   ZNMethodData* const data = gc_data(nm);
225   if (data == NULL) {
226     return NULL;
227   }
228   return data->lock();
229 }
230 
231 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
232   assert(is_power_of_2(size), "Invalid size");
233   const size_t mask = size - 1;
234   const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
235   return hash & mask;
236 }
237 
238 size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
239   assert(is_power_of_2(size), "Invalid size");
240   const size_t mask = size - 1;
241   return (prev_index + 1) & mask;
242 }
243 
244 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry) {
245   const nmethod* const nm = entry.method();
246   size_t index = first_index(nm, size);
247 
248   for (;;) {
249     const ZNMethodTableEntry table_entry = table[index];
250 
251     if (!table_entry.registered() && !table_entry.unregistered()) {
252       // Insert new entry
253       table[index] = entry;
254       return true;
255     }
256 
257     if (table_entry.registered() && table_entry.method() == nm) {
258       // Replace existing entry
259       table[index] = entry;
260       return false;
261     }
262 
263     index = next_index(index, size);
264   }
265 }
266 
267 bool ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
268   if (size == 0) {
269     // Table is empty
270     return false;
271   }
272 
273   size_t index = first_index(nm, size);
274 
275   for (;;) {
276     const ZNMethodTableEntry table_entry = table[index];
277 
278     if (!table_entry.registered() && !table_entry.unregistered()) {
279       // Entry not found
280       return false;
281     }
282 
283     if (table_entry.registered() && table_entry.method() == nm) {
284       // Remove entry
285       table[index] = ZNMethodTableEntry(true /* unregistered */);
286 
287       // Destroy GC data
288       ZNMethodData::destroy(gc_data(nm));
289       set_gc_data(nm, NULL);
290       return true;
291     }
292 
293     index = next_index(index, size);
294   }
295 }
296 
297 void ZNMethodTable::rebuild(size_t new_size) {
298   ZLocker<ZLock> locker(&_iter_lock);
299   assert(is_power_of_2(new_size), "Invalid size");
300 
301   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
302                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
303                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
304                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
305                          _size, new_size,
306                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
307                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
308 
309   // Allocate new table
310   ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
311 
312   // Transfer all registered entries
313   for (size_t i = 0; i < _size; i++) {
314     const ZNMethodTableEntry entry = _table[i];
315     if (entry.registered()) {
316       register_entry(new_table, new_size, entry);
317     }
318   }
319 
320   if (_iter_table != _table) {
321     // Delete old table
322     delete [] _table;
323   }
324 
325   // Install new table
326   _table = new_table;
327   _size = new_size;
328   _nunregistered = 0;
329 }
330 
331 void ZNMethodTable::rebuild_if_needed() {
332   // The hash table uses linear probing. To avoid wasting memory while
333   // at the same time maintaining good hash collision behavior we want
334   // to keep the table occupancy between 30% and 70%. The table always
335   // grows/shrinks by doubling/halving its size. Pruning of unregistered
336   // entries is done by rebuilding the table with or without resizing it.
337   const size_t min_size = 1024;
338   const size_t shrink_threshold = _size * 0.30;
339   const size_t prune_threshold = _size * 0.65;
340   const size_t grow_threshold = _size * 0.70;
341 
342   if (_size == 0) {
343     // Initialize table
344     rebuild(min_size);
345   } else if (_nregistered < shrink_threshold && _size > min_size) {
346     // Shrink table
347     rebuild(_size / 2);
348   } else if (_nregistered + _nunregistered > grow_threshold) {
349     // Prune or grow table
350     if (_nregistered < prune_threshold) {
351       // Prune table
352       rebuild(_size);
353     } else {
354       // Grow table
355       rebuild(_size * 2);
356     }
357   }
358 }
359 
360 void ZNMethodTable::log_register(const nmethod* nm, ZNMethodTableEntry entry) {
361   LogTarget(Trace, gc, nmethod) log;
362   if (!log.is_enabled()) {
363     return;
364   }
365 
366   log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
367             "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
368             nm->method()->method_holder()->external_name(),
369             nm->method()->name()->as_C_string(),
370             p2i(nm),
371             nm->compiler_name(),
372             nm->oops_count() - 1,
373             entry.immediate_oops() ? gc_data(nm)->immediate_oops()->immediate_oops_count() : 0,
374             entry.non_immediate_oops() ? "Yes" : "No");
375 
376   LogTarget(Trace, gc, nmethod, oops) log_oops;
377   if (!log_oops.is_enabled()) {
378     return;
379   }
380 
381   // Print nmethod oops table
382   oop* const begin = nm->oops_begin();
383   oop* const end = nm->oops_end();
384   for (oop* p = begin; p < end; p++) {
385     log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
386                    (p - begin), p2i(*p), (*p)->klass()->external_name());
387   }
388 
389   if (entry.immediate_oops()) {
390     // Print nmethod immediate oops
391     const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
392     if (nmi != NULL) {
393       oop** const begin = nmi->immediate_oops_begin();
394       oop** const end = nmi->immediate_oops_end();
395       for (oop** p = begin; p < end; p++) {
396         log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
397                        (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
398       }
399     }
400   }
401 }
402 
403 void ZNMethodTable::log_unregister(const nmethod* nm) {
404   LogTarget(Debug, gc, nmethod) log;
405   if (!log.is_enabled()) {
406     return;
407   }
408 
409   log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
410             nm->method()->method_holder()->external_name(),
411             nm->method()->name()->as_C_string(),
412             p2i(nm));
413 }
414 
415 size_t ZNMethodTable::registered_nmethods() {
416   return _nregistered;
417 }
418 
419 size_t ZNMethodTable::unregistered_nmethods() {
420   return _nunregistered;
421 }
422 
423 void ZNMethodTable::register_nmethod(nmethod* nm) {
424   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
425   ResourceMark rm;
426 
427   // Grow/Shrink/Prune table if needed
428   rebuild_if_needed();
429 
430   // Create entry
431   const ZNMethodTableEntry entry = create_entry(nm);
432 
433   log_register(nm, entry);
434 
435   // Insert new entry
436   if (register_entry(_table, _size, entry)) {
437     // New entry registered. When register_entry() instead returns
438     // false the nmethod was already in the table so we do not want
439     // to increase number of registered entries in that case.
440     _nregistered++;
441   }
442 
443   // Disarm nmethod entry barrier
444   disarm_nmethod(nm);
445 }
446 
447 void ZNMethodTable::sweeper_wait_for_iteration() {
448   // The sweeper must wait for any ongoing iteration to complete
449   // before it can unregister an nmethod.
450   if (!Thread::current()->is_Code_cache_sweeper_thread()) {
451     return;
452   }
453 
454   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
455 
456   while (_iter_table != NULL) {
457     MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
458     os::naked_short_sleep(1);
459   }
460 }
461 
462 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
463   ResourceMark rm;
464 
465   sweeper_wait_for_iteration();
466 
467   log_unregister(nm);
468 
469   // Remove entry
470   if (unregister_entry(_table, _size, nm)) {
471     // Entry was unregistered. When unregister_entry() instead returns
472     // false the nmethod was not in the table (because it didn't have
473     // any oops) so we do not want to decrease the number of registered
474     // entries in that case.
475     _nregistered--;
476     _nunregistered++;
477   }
478 }
479 
480 void ZNMethodTable::disarm_nmethod(nmethod* nm) {
481   BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
482   if (bs != NULL) {
483     bs->disarm(nm);
484   }
485 }
486 
487 void ZNMethodTable::nmethod_entries_do_begin() {
488   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
489   ZLocker<ZLock> locker(&_iter_lock);
490 
491   // Prepare iteration
492   _iter_table = _table;
493   _iter_table_size = _size;
494   _claimed = 0;
495   assert(_iter_deferred_deletes.is_empty(), "Should be emtpy");
496 }
497 
498 void ZNMethodTable::nmethod_entries_do_end() {
499   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
500   ZLocker<ZLock> locker(&_iter_lock);
501 
502   // Finish iteration
503   if (_iter_table != _table) {
504     delete [] _iter_table;
505   }
506   _iter_table = NULL;
507   assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
508 
509   // Process deferred deletes
510   ZArrayIterator<void*> iter(&_iter_deferred_deletes);
511   for (void* data; iter.next(&data);) {
512     FREE_C_HEAP_ARRAY(uint8_t, data);
513   }
514   _iter_deferred_deletes.clear();
515 }
516 
517 void ZNMethodTable::entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl) {
518   nmethod* const nm = entry.method();
519 
520   // Process oops table
521   oop* const begin = nm->oops_begin();
522   oop* const end = nm->oops_end();
523   for (oop* p = begin; p < end; p++) {
524     if (*p != Universe::non_oop_word()) {
525       cl->do_oop(p);
526     }
527   }
528 
529   // Process immediate oops
530   if (entry.immediate_oops()) {
531     const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
532     if (nmi != NULL) {
533       oop** const begin = nmi->immediate_oops_begin();
534       oop** const end = nmi->immediate_oops_end();
535       for (oop** p = begin; p < end; p++) {
536         if (**p != Universe::non_oop_word()) {
537           cl->do_oop(*p);
538         }
539       }
540     }
541   }
542 
543   // Process non-immediate oops
544   if (entry.non_immediate_oops()) {
545     nmethod* const nm = entry.method();
546     nm->fix_oop_relocations();
547   }
548 }
549 
550 class ZNMethodTableEntryToOopsDo : public ZNMethodTableEntryClosure {
551 private:
552   OopClosure* _cl;
553 
554 public:
555   ZNMethodTableEntryToOopsDo(OopClosure* cl) :
556       _cl(cl) {}
557 
558   void do_nmethod_entry(ZNMethodTableEntry entry) {
559     ZNMethodTable::entry_oops_do(entry, _cl);
560   }
561 };
562 
563 void ZNMethodTable::oops_do(OopClosure* cl) {
564   ZNMethodTableEntryToOopsDo entry_cl(cl);
565   nmethod_entries_do(&entry_cl);
566 }
567 
568 void ZNMethodTable::nmethod_entries_do(ZNMethodTableEntryClosure* cl) {
569   for (;;) {
570     // Claim table partition. Each partition is currently sized to span
571     // two cache lines. This number is just a guess, but seems to work well.
572     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
573     const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _iter_table_size);
574     const size_t partition_end = MIN2(partition_start + partition_size, _iter_table_size);
575     if (partition_start == partition_end) {
576       // End of table
577       break;
578     }
579 
580     // Process table partition
581     for (size_t i = partition_start; i < partition_end; i++) {
582       const ZNMethodTableEntry entry = _iter_table[i];
583       if (entry.registered()) {
584         cl->do_nmethod_entry(entry);
585       }
586     }
587   }
588 }
589 
590 class ZNMethodTableUnlinkClosure : public ZNMethodTableEntryClosure {
591 private:
592   bool          _unloading_occurred;
593   volatile bool _failed;
594 
595   void set_failed() {
596     Atomic::store(true, &_failed);
597   }
598 
599 public:
600   ZNMethodTableUnlinkClosure(bool unloading_occurred) :
601       _unloading_occurred(unloading_occurred),
602       _failed(false) {}
603 
604   virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
605     if (failed()) {
606       return;
607     }
608 
609     nmethod* const nm = entry.method();
610     if (!nm->is_alive()) {
611       return;
612     }
613 
614     ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
615 
616     if (nm->is_unloading()) {
617       // Unlinking of the dependencies must happen before the
618       // handshake separating unlink and purge.
619       nm->flush_dependencies(false /* delete_immediately */);
620 
621       // We don't need to take the lock when unlinking nmethods from
622       // the Method, because it is only concurrently unlinked by
623       // the entry barrier, which acquires the per nmethod lock.
624       nm->unlink_from_method(false /* acquire_lock */);
625       return;
626     }
627 
628     // Heal oops and disarm
629     ZNMethodOopClosure cl;
630     ZNMethodTable::entry_oops_do(entry, &cl);
631     ZNMethodTable::disarm_nmethod(nm);
632 
633     // Clear compiled ICs and exception caches
634     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
635       set_failed();
636     }
637   }
638 
639   bool failed() const {
640     return Atomic::load(&_failed);
641   }
642 };
643 
644 class ZNMethodTableUnlinkTask : public ZTask {
645 private:
646   ZNMethodTableUnlinkClosure _cl;
647   ICRefillVerifier*          _verifier;
648 
649 public:
650   ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
651       ZTask("ZNMethodTableUnlinkTask"),
652       _cl(unloading_occurred),
653       _verifier(verifier) {
654     ZNMethodTable::nmethod_entries_do_begin();
655   }
656 
657   ~ZNMethodTableUnlinkTask() {
658     ZNMethodTable::nmethod_entries_do_end();
659   }
660 
661   virtual void work() {
662     ICRefillVerifierMark mark(_verifier);
663     ZNMethodTable::nmethod_entries_do(&_cl);
664   }
665 
666   bool success() const {
667     return !_cl.failed();
668   }
669 };
670 
671 void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) {
672   for (;;) {
673     ICRefillVerifier verifier;
674 
675     {
676       ZNMethodTableUnlinkTask task(unloading_occurred, &verifier);
677       workers->run_concurrent(&task);
678       if (task.success()) {
679         return;
680       }
681     }
682 
683     // Cleaning failed because we ran out of transitional IC stubs,
684     // so we have to refill and try again. Refilling requires taking
685     // a safepoint, so we temporarily leave the suspendible thread set.
686     SuspendibleThreadSetLeaver sts;
687     InlineCacheBuffer::refill_ic_stubs();
688   }
689 }
690 
691 class ZNMethodTablePurgeClosure : public ZNMethodTableEntryClosure {
692 public:
693   virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
694     nmethod* const nm = entry.method();
695     if (nm->is_alive() && nm->is_unloading()) {
696       nm->make_unloaded();
697     }
698   }
699 };
700 
701 class ZNMethodTablePurgeTask : public ZTask {
702 private:
703   ZNMethodTablePurgeClosure _cl;
704 
705 public:
706   ZNMethodTablePurgeTask() :
707       ZTask("ZNMethodTablePurgeTask"),
708       _cl() {
709     ZNMethodTable::nmethod_entries_do_begin();
710   }
711 
712   ~ZNMethodTablePurgeTask() {
713     ZNMethodTable::nmethod_entries_do_end();
714   }
715 
716   virtual void work() {
717     ZNMethodTable::nmethod_entries_do(&_cl);
718   }
719 };
720 
721 void ZNMethodTable::purge(ZWorkers* workers) {
722   ZNMethodTablePurgeTask task;
723   workers->run_concurrent(&task);
724 }