1 /*
   2  * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/relocInfo.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/z/zGlobals.hpp"
  31 #include "gc/z/zHash.inline.hpp"
  32 #include "gc/z/zLock.inline.hpp"
  33 #include "gc/z/zNMethodAllocator.hpp"
  34 #include "gc/z/zNMethodTable.hpp"
  35 #include "gc/z/zOopClosures.inline.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zWorkers.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/orderAccess.hpp"
  43 #include "utilities/debug.hpp"
  44 
  45 class ZNMethodDataOops {
  46 private:
  47   const size_t _nimmediates;
  48   bool         _has_non_immediates;
  49 
  50   static size_t header_size();
  51 
  52   ZNMethodDataOops(const GrowableArray<oop*>& immediates, bool has_non_immediates);
  53 
  54 public:
  55   static ZNMethodDataOops* create(const GrowableArray<oop*>& immediates, bool has_non_immediates);
  56   static void destroy(ZNMethodDataOops* oops);
  57 
  58   size_t immediates_count() const;
  59   oop**  immediates_begin() const;
  60   oop**  immediates_end()   const;
  61 
  62   bool   has_non_immediates() const;
  63 };
  64 
  65 size_t ZNMethodDataOops::header_size() {
  66   const size_t size = sizeof(ZNMethodDataOops);
  67   assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
  68   return size;
  69 }
  70 
  71 ZNMethodDataOops* ZNMethodDataOops::create(const GrowableArray<oop*>& immediates, bool has_non_immediates) {
  72   // Allocate memory for the ZNMethodDataOops object
  73   // plus the immediate oop* array that follows right after.
  74   const size_t size = ZNMethodDataOops::header_size() + (sizeof(oop*) * immediates.length());
  75   void* const mem = ZNMethodAllocator::allocate(size);
  76   return ::new (mem) ZNMethodDataOops(immediates, has_non_immediates);
  77 }
  78 
  79 void ZNMethodDataOops::destroy(ZNMethodDataOops* oops) {
  80   ZNMethodAllocator::free(oops);
  81 }
  82 
  83 ZNMethodDataOops::ZNMethodDataOops(const GrowableArray<oop*>& immediates, bool has_non_immediates) :
  84     _nimmediates(immediates.length()),
  85     _has_non_immediates(has_non_immediates) {
  86   // Save all immediate oops
  87   for (size_t i = 0; i < _nimmediates; i++) {
  88     immediates_begin()[i] = immediates.at(i);
  89   }
  90 }
  91 
  92 size_t ZNMethodDataOops::immediates_count() const {
  93   return _nimmediates;
  94 }
  95 
  96 oop** ZNMethodDataOops::immediates_begin() const {
  97   // The immediate oop* array starts immediately after this object
  98   return (oop**)((uintptr_t)this + header_size());
  99 }
 100 
 101 oop** ZNMethodDataOops::immediates_end() const {
 102   return immediates_begin() + immediates_count();
 103 }
 104 
 105 bool ZNMethodDataOops::has_non_immediates() const {
 106   return _has_non_immediates;
 107 }
 108 
 109 class ZNMethodData {
 110 private:
 111   ZReentrantLock             _lock;
 112   ZNMethodDataOops* volatile _oops;
 113 
 114   ZNMethodData(nmethod* nm);
 115 
 116 public:
 117   static ZNMethodData* create(nmethod* nm);
 118   static void destroy(ZNMethodData* data);
 119 
 120   ZReentrantLock* lock();
 121 
 122   ZNMethodDataOops* oops() const;
 123   ZNMethodDataOops* swap_oops(ZNMethodDataOops* oops);
 124 };
 125 
 126 ZNMethodData* ZNMethodData::create(nmethod* nm) {
 127   void* const mem = ZNMethodAllocator::allocate(sizeof(ZNMethodData));
 128   return ::new (mem) ZNMethodData(nm);
 129 }
 130 
 131 void ZNMethodData::destroy(ZNMethodData* data) {
 132   ZNMethodAllocator::free(data->oops());
 133   ZNMethodAllocator::free(data);
 134 }
 135 
 136 ZNMethodData::ZNMethodData(nmethod* nm) :
 137     _lock(),
 138     _oops(NULL) {}
 139 
 140 ZReentrantLock* ZNMethodData::lock() {
 141   return &_lock;
 142 }
 143 
 144 ZNMethodDataOops* ZNMethodData::oops() const {
 145   return OrderAccess::load_acquire(&_oops);
 146 }
 147 
 148 ZNMethodDataOops* ZNMethodData::swap_oops(ZNMethodDataOops* new_oops) {
 149   return Atomic::xchg(new_oops, &_oops);
 150 }
 151 
 152 static ZNMethodData* gc_data(const nmethod* nm) {
 153   return nm->gc_data<ZNMethodData>();
 154 }
 155 
 156 static void set_gc_data(nmethod* nm, ZNMethodData* data) {
 157   return nm->set_gc_data<ZNMethodData>(data);
 158 }
 159 
 160 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
 161 size_t ZNMethodTable::_size = 0;
 162 ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
 163 size_t ZNMethodTable::_iter_table_size = 0;
 164 size_t ZNMethodTable::_nregistered = 0;
 165 size_t ZNMethodTable::_nunregistered = 0;
 166 volatile size_t ZNMethodTable::_claimed = 0;
 167 
 168 void ZNMethodTable::attach_gc_data(nmethod* nm) {
 169   GrowableArray<oop*> immediate_oops;
 170   bool non_immediate_oops = false;
 171 
 172   // Find all oops relocations
 173   RelocIterator iter(nm);
 174   while (iter.next()) {
 175     if (iter.type() != relocInfo::oop_type) {
 176       // Not an oop
 177       continue;
 178     }
 179 
 180     oop_Relocation* r = iter.oop_reloc();
 181 
 182     if (!r->oop_is_immediate()) {
 183       // Non-immediate oop found
 184       non_immediate_oops = true;
 185       continue;
 186     }
 187 
 188     if (r->oop_value() != NULL) {
 189       // Non-NULL immediate oop found. NULL oops can safely be
 190       // ignored since the method will be re-registered if they
 191       // are later patched to be non-NULL.
 192       immediate_oops.push(r->oop_addr());
 193     }
 194   }
 195 
 196   // Attach GC data to nmethod
 197   ZNMethodData* data = gc_data(nm);
 198   if (data == NULL) {
 199     data = ZNMethodData::create(nm);
 200     set_gc_data(nm, data);
 201   }
 202 
 203   // Attach oops in GC data
 204   ZNMethodDataOops* const new_oops = ZNMethodDataOops::create(immediate_oops, non_immediate_oops);
 205   ZNMethodDataOops* const old_oops = data->swap_oops(new_oops);
 206   ZNMethodDataOops::destroy(old_oops);
 207 }
 208 
 209 void ZNMethodTable::detach_gc_data(nmethod* nm) {
 210   // Destroy GC data
 211   ZNMethodData::destroy(gc_data(nm));
 212   set_gc_data(nm, NULL);
 213 }
 214 
 215 ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) {
 216   ZNMethodData* const data = gc_data(nm);
 217   if (data == NULL) {
 218     return NULL;
 219   }
 220   return data->lock();
 221 }
 222 
 223 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
 224   assert(is_power_of_2(size), "Invalid size");
 225   const size_t mask = size - 1;
 226   const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
 227   return hash & mask;
 228 }
 229 
 230 size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
 231   assert(is_power_of_2(size), "Invalid size");
 232   const size_t mask = size - 1;
 233   return (prev_index + 1) & mask;
 234 }
 235 
 236 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 237   const ZNMethodTableEntry entry(nm);
 238   size_t index = first_index(nm, size);
 239 
 240   for (;;) {
 241     const ZNMethodTableEntry table_entry = table[index];
 242 
 243     if (!table_entry.registered() && !table_entry.unregistered()) {
 244       // Insert new entry
 245       table[index] = entry;
 246       return true;
 247     }
 248 
 249     if (table_entry.registered() && table_entry.method() == nm) {
 250       // Replace existing entry
 251       table[index] = entry;
 252       return false;
 253     }
 254 
 255     index = next_index(index, size);
 256   }
 257 }
 258 
 259 void ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 260   size_t index = first_index(nm, size);
 261 
 262   for (;;) {
 263     const ZNMethodTableEntry table_entry = table[index];
 264     assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
 265 
 266     if (table_entry.registered() && table_entry.method() == nm) {
 267       // Remove entry
 268       table[index] = ZNMethodTableEntry(true /* unregistered */);
 269       return;
 270     }
 271 
 272     index = next_index(index, size);
 273   }
 274 }
 275 
 276 void ZNMethodTable::rebuild(size_t new_size) {
 277   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 278 
 279   assert(is_power_of_2(new_size), "Invalid size");
 280 
 281   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
 282                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
 283                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
 284                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
 285                          _size, new_size,
 286                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
 287                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 288 
 289   // Allocate new table
 290   ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
 291 
 292   // Transfer all registered entries
 293   for (size_t i = 0; i < _size; i++) {
 294     const ZNMethodTableEntry entry = _table[i];
 295     if (entry.registered()) {
 296       register_entry(new_table, new_size, entry.method());
 297     }
 298   }
 299 
 300   if (_iter_table != _table) {
 301     // Delete old table
 302     delete [] _table;
 303   }
 304 
 305   // Install new table
 306   _table = new_table;
 307   _size = new_size;
 308   _nunregistered = 0;
 309 }
 310 
 311 void ZNMethodTable::rebuild_if_needed() {
 312   // The hash table uses linear probing. To avoid wasting memory while
 313   // at the same time maintaining good hash collision behavior we want
 314   // to keep the table occupancy between 30% and 70%. The table always
 315   // grows/shrinks by doubling/halving its size. Pruning of unregistered
 316   // entries is done by rebuilding the table with or without resizing it.
 317   const size_t min_size = 1024;
 318   const size_t shrink_threshold = _size * 0.30;
 319   const size_t prune_threshold = _size * 0.65;
 320   const size_t grow_threshold = _size * 0.70;
 321 
 322   if (_size == 0) {
 323     // Initialize table
 324     rebuild(min_size);
 325   } else if (_nregistered < shrink_threshold && _size > min_size) {
 326     // Shrink table
 327     rebuild(_size / 2);
 328   } else if (_nregistered + _nunregistered > grow_threshold) {
 329     // Prune or grow table
 330     if (_nregistered < prune_threshold) {
 331       // Prune table
 332       rebuild(_size);
 333     } else {
 334       // Grow table
 335       rebuild(_size * 2);
 336     }
 337   }
 338 }
 339 
 340 void ZNMethodTable::log_register(const nmethod* nm) {
 341   LogTarget(Trace, gc, nmethod) log;
 342   if (!log.is_enabled()) {
 343     return;
 344   }
 345 
 346   const ZNMethodDataOops* const oops = gc_data(nm)->oops();
 347 
 348   log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
 349             "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
 350             nm->method()->method_holder()->external_name(),
 351             nm->method()->name()->as_C_string(),
 352             p2i(nm),
 353             nm->compiler_name(),
 354             nm->oops_count() - 1,
 355             oops->immediates_count(),
 356             oops->has_non_immediates() ? "Yes" : "No");
 357 
 358   LogTarget(Trace, gc, nmethod, oops) log_oops;
 359   if (!log_oops.is_enabled()) {
 360     return;
 361   }
 362 
 363   // Print nmethod oops table
 364   oop* const begin = nm->oops_begin();
 365   oop* const end = nm->oops_end();
 366   for (oop* p = begin; p < end; p++) {
 367     log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
 368                    (p - begin), p2i(*p), (*p)->klass()->external_name());
 369   }
 370 
 371   // Print nmethod immediate oops
 372   if (oops->immediates_count() > 0) {
 373     oop** const begin = oops->immediates_begin();
 374     oop** const end = oops->immediates_end();
 375     for (oop** p = begin; p < end; p++) {
 376       log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
 377                      (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
 378     }
 379   }
 380 }
 381 
 382 void ZNMethodTable::log_unregister(const nmethod* nm) {
 383   LogTarget(Debug, gc, nmethod) log;
 384   if (!log.is_enabled()) {
 385     return;
 386   }
 387 
 388   log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
 389             nm->method()->method_holder()->external_name(),
 390             nm->method()->name()->as_C_string(),
 391             p2i(nm));
 392 }
 393 
 394 size_t ZNMethodTable::registered_nmethods() {
 395   return _nregistered;
 396 }
 397 
 398 size_t ZNMethodTable::unregistered_nmethods() {
 399   return _nunregistered;
 400 }
 401 
 402 void ZNMethodTable::register_nmethod(nmethod* nm) {
 403   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 404   ResourceMark rm;
 405 
 406   // Grow/Shrink/Prune table if needed
 407   rebuild_if_needed();
 408 
 409   // Create and attach gc data
 410   attach_gc_data(nm);
 411 
 412   log_register(nm);
 413 
 414   // Insert new entry
 415   if (register_entry(_table, _size, nm)) {
 416     // New entry registered. When register_entry() instead returns
 417     // false the nmethod was already in the table so we do not want
 418     // to increase number of registered entries in that case.
 419     _nregistered++;
 420   }
 421 
 422   // Disarm nmethod entry barrier
 423   disarm_nmethod(nm);
 424 }
 425 
 426 void ZNMethodTable::wait_until_iteration_done() {
 427   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 428 
 429   while (_iter_table != NULL) {
 430     CodeCache_lock->wait(Monitor::_no_safepoint_check_flag);
 431   }
 432 }
 433 
 434 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
 435   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 436 
 437   if (Thread::current()->is_Code_cache_sweeper_thread()) {
 438     // The sweeper must wait for any ongoing iteration to complete
 439     // before it can unregister an nmethod.
 440     ZNMethodTable::wait_until_iteration_done();
 441   }
 442 
 443   ResourceMark rm;
 444 
 445   log_unregister(nm);
 446 
 447   // Remove entry
 448   unregister_entry(_table, _size, nm);
 449   _nunregistered++;
 450   _nregistered--;
 451 
 452   detach_gc_data(nm);
 453 }
 454 
 455 void ZNMethodTable::disarm_nmethod(nmethod* nm) {
 456   BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 457   if (bs != NULL) {
 458     bs->disarm(nm);
 459   }
 460 }
 461 
 462 void ZNMethodTable::nmethods_do_begin() {
 463   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 464 
 465   // Make sure we don't free data while iterating
 466   ZNMethodAllocator::activate_deferred_frees();
 467 
 468   // Prepare iteration
 469   _iter_table = _table;
 470   _iter_table_size = _size;
 471   _claimed = 0;
 472 }
 473 
 474 void ZNMethodTable::nmethods_do_end() {
 475   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 476 
 477   // Finish iteration
 478   if (_iter_table != _table) {
 479     delete [] _iter_table;
 480   }
 481   _iter_table = NULL;
 482 
 483   assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
 484 
 485   // Process deferred frees
 486   ZNMethodAllocator::deactivate_and_process_deferred_frees();
 487 
 488   // Notify iteration done
 489   CodeCache_lock->notify_all();
 490 }
 491 
 492 void ZNMethodTable::oops_do(nmethod* nm, OopClosure* cl) {
 493   // Process oops table
 494   oop* const begin = nm->oops_begin();
 495   oop* const end = nm->oops_end();
 496   for (oop* p = begin; p < end; p++) {
 497     if (*p != Universe::non_oop_word()) {
 498       cl->do_oop(p);
 499     }
 500   }
 501 
 502   ZNMethodDataOops* const oops = gc_data(nm)->oops();
 503 
 504   // Process immediate oops
 505   if (oops->immediates_count() > 0) {
 506     oop** const begin = oops->immediates_begin();
 507     oop** const end = oops->immediates_end();
 508     for (oop** p = begin; p < end; p++) {
 509       if (**p != Universe::non_oop_word()) {
 510         cl->do_oop(*p);
 511       }
 512     }
 513   }
 514 
 515   // Process non-immediate oops
 516   if (oops->has_non_immediates()) {
 517     nm->fix_oop_relocations();
 518   }
 519 }
 520 
 521 class ZNMethodToOopsDo : public ZNMethodClosure {
 522 private:
 523   OopClosure* _cl;
 524 
 525 public:
 526   ZNMethodToOopsDo(OopClosure* cl) :
 527       _cl(cl) {}
 528 
 529   void do_nmethod(nmethod* nm) {
 530     ZNMethodTable::oops_do(nm, _cl);
 531   }
 532 };
 533 
 534 void ZNMethodTable::oops_do(OopClosure* cl) {
 535   ZNMethodToOopsDo nm_cl(cl);
 536   nmethods_do(&nm_cl);
 537 }
 538 
 539 void ZNMethodTable::nmethods_do(ZNMethodClosure* cl) {
 540   for (;;) {
 541     // Claim table partition. Each partition is currently sized to span
 542     // two cache lines. This number is just a guess, but seems to work well.
 543     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
 544     const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _iter_table_size);
 545     const size_t partition_end = MIN2(partition_start + partition_size, _iter_table_size);
 546     if (partition_start == partition_end) {
 547       // End of table
 548       break;
 549     }
 550 
 551     // Process table partition
 552     for (size_t i = partition_start; i < partition_end; i++) {
 553       const ZNMethodTableEntry entry = _iter_table[i];
 554       if (entry.registered()) {
 555         cl->do_nmethod(entry.method());
 556       }
 557     }
 558   }
 559 }
 560 
 561 class ZNMethodTableUnlinkClosure : public ZNMethodClosure {
 562 private:
 563   bool          _unloading_occurred;
 564   volatile bool _failed;
 565 
 566   void set_failed() {
 567     Atomic::store(true, &_failed);
 568   }
 569 
 570 public:
 571   ZNMethodTableUnlinkClosure(bool unloading_occurred) :
 572       _unloading_occurred(unloading_occurred),
 573       _failed(false) {}
 574 
 575   virtual void do_nmethod(nmethod* nm) {
 576     if (failed()) {
 577       return;
 578     }
 579 
 580     if (!nm->is_alive()) {
 581       return;
 582     }
 583 
 584     ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
 585 
 586     if (nm->is_unloading()) {
 587       // Unlinking of the dependencies must happen before the
 588       // handshake separating unlink and purge.
 589       nm->flush_dependencies(false /* delete_immediately */);
 590 
 591       // We don't need to take the lock when unlinking nmethods from
 592       // the Method, because it is only concurrently unlinked by
 593       // the entry barrier, which acquires the per nmethod lock.
 594       nm->unlink_from_method(false /* acquire_lock */);
 595       return;
 596     }
 597 
 598     // Heal oops and disarm
 599     ZNMethodOopClosure cl;
 600     ZNMethodTable::oops_do(nm, &cl);
 601     ZNMethodTable::disarm_nmethod(nm);
 602 
 603     // Clear compiled ICs and exception caches
 604     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 605       set_failed();
 606     }
 607   }
 608 
 609   bool failed() const {
 610     return Atomic::load(&_failed);
 611   }
 612 };
 613 
 614 class ZNMethodTableUnlinkTask : public ZTask {
 615 private:
 616   ZNMethodTableUnlinkClosure _cl;
 617   ICRefillVerifier*          _verifier;
 618 
 619 public:
 620   ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 621       ZTask("ZNMethodTableUnlinkTask"),
 622       _cl(unloading_occurred),
 623       _verifier(verifier) {
 624     ZNMethodTable::nmethods_do_begin();
 625   }
 626 
 627   ~ZNMethodTableUnlinkTask() {
 628     ZNMethodTable::nmethods_do_end();
 629   }
 630 
 631   virtual void work() {
 632     ICRefillVerifierMark mark(_verifier);
 633     ZNMethodTable::nmethods_do(&_cl);
 634   }
 635 
 636   bool success() const {
 637     return !_cl.failed();
 638   }
 639 };
 640 
 641 void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) {
 642   for (;;) {
 643     ICRefillVerifier verifier;
 644 
 645     {
 646       ZNMethodTableUnlinkTask task(unloading_occurred, &verifier);
 647       workers->run_concurrent(&task);
 648       if (task.success()) {
 649         return;
 650       }
 651     }
 652 
 653     // Cleaning failed because we ran out of transitional IC stubs,
 654     // so we have to refill and try again. Refilling requires taking
 655     // a safepoint, so we temporarily leave the suspendible thread set.
 656     SuspendibleThreadSetLeaver sts;
 657     InlineCacheBuffer::refill_ic_stubs();
 658   }
 659 }
 660 
 661 class ZNMethodTablePurgeClosure : public ZNMethodClosure {
 662 public:
 663   virtual void do_nmethod(nmethod* nm) {
 664     if (nm->is_alive() && nm->is_unloading()) {
 665       nm->make_unloaded();
 666     }
 667   }
 668 };
 669 
 670 class ZNMethodTablePurgeTask : public ZTask {
 671 private:
 672   ZNMethodTablePurgeClosure _cl;
 673 
 674 public:
 675   ZNMethodTablePurgeTask() :
 676       ZTask("ZNMethodTablePurgeTask"),
 677       _cl() {
 678     ZNMethodTable::nmethods_do_begin();
 679   }
 680 
 681   ~ZNMethodTablePurgeTask() {
 682     ZNMethodTable::nmethods_do_end();
 683   }
 684 
 685   virtual void work() {
 686     ZNMethodTable::nmethods_do(&_cl);
 687   }
 688 };
 689 
 690 void ZNMethodTable::purge(ZWorkers* workers) {
 691   ZNMethodTablePurgeTask task;
 692   workers->run_concurrent(&task);
 693 }