1 /*
   2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/relocInfo.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/z/zArray.inline.hpp"
  31 #include "gc/z/zGlobals.hpp"
  32 #include "gc/z/zHash.inline.hpp"
  33 #include "gc/z/zLock.inline.hpp"
  34 #include "gc/z/zNMethodTable.hpp"
  35 #include "gc/z/zOopClosures.inline.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zWorkers.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/orderAccess.hpp"
  43 #include "utilities/debug.hpp"
  44 
  45 class ZNMethodDataImmediateOops {
  46 private:
  47   const size_t _nimmediate_oops;
  48 
  49   static size_t header_size();
  50 
  51   ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops);
  52 
  53 public:
  54   static ZNMethodDataImmediateOops* create(const GrowableArray<oop*>& immediate_oops);
  55   static void destroy(ZNMethodDataImmediateOops* data_immediate_oops);
  56 
  57   size_t immediate_oops_count() const;
  58   oop** immediate_oops_begin() const;
  59   oop** immediate_oops_end() const;
  60 };
  61 
  62 size_t ZNMethodDataImmediateOops::header_size() {
  63   const size_t size = sizeof(ZNMethodDataImmediateOops);
  64   assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
  65   return size;
  66 }
  67 
  68 ZNMethodDataImmediateOops* ZNMethodDataImmediateOops::create(const GrowableArray<oop*>& immediate_oops) {
  69   // Allocate memory for the ZNMethodDataImmediateOops object
  70   // plus the immediate oop* array that follows right after.
  71   const size_t size = ZNMethodDataImmediateOops::header_size() + (sizeof(oop*) * immediate_oops.length());
  72   void* const data_immediate_oops = NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
  73   return ::new (data_immediate_oops) ZNMethodDataImmediateOops(immediate_oops);
  74 }
  75 
  76 void ZNMethodDataImmediateOops::destroy(ZNMethodDataImmediateOops* data_immediate_oops) {
  77   ZNMethodTable::safe_delete(data_immediate_oops);
  78 }
  79 
  80 ZNMethodDataImmediateOops::ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops) :
  81     _nimmediate_oops(immediate_oops.length()) {
  82   // Save all immediate oops
  83   for (size_t i = 0; i < _nimmediate_oops; i++) {
  84     immediate_oops_begin()[i] = immediate_oops.at(i);
  85   }
  86 }
  87 
  88 size_t ZNMethodDataImmediateOops::immediate_oops_count() const {
  89   return _nimmediate_oops;
  90 }
  91 
  92 oop** ZNMethodDataImmediateOops::immediate_oops_begin() const {
  93   // The immediate oop* array starts immediately after this object
  94   return (oop**)((uintptr_t)this + header_size());
  95 }
  96 
  97 oop** ZNMethodDataImmediateOops::immediate_oops_end() const {
  98   return immediate_oops_begin() + immediate_oops_count();
  99 }
 100 
 101 class ZNMethodData {
 102 private:
 103   ZReentrantLock                      _lock;
 104   ZNMethodDataImmediateOops* volatile _immediate_oops;
 105 
 106   ZNMethodData(nmethod* nm);
 107 
 108 public:
 109   static ZNMethodData* create(nmethod* nm);
 110   static void destroy(ZNMethodData* data);
 111 
 112   ZReentrantLock* lock();
 113 
 114   ZNMethodDataImmediateOops* immediate_oops() const;
 115   ZNMethodDataImmediateOops* swap_immediate_oops(const GrowableArray<oop*>& immediate_oops);
 116 };
 117 
 118 ZNMethodData* ZNMethodData::create(nmethod* nm) {
 119   void* const method = NEW_C_HEAP_ARRAY(uint8_t, sizeof(ZNMethodData), mtGC);
 120   return ::new (method) ZNMethodData(nm);
 121 }
 122 
 123 void ZNMethodData::destroy(ZNMethodData* data) {
 124   ZNMethodDataImmediateOops::destroy(data->immediate_oops());
 125   ZNMethodTable::safe_delete(data);
 126 }
 127 
 128 ZNMethodData::ZNMethodData(nmethod* nm) :
 129     _lock(),
 130     _immediate_oops(NULL) {}
 131 
 132 ZReentrantLock* ZNMethodData::lock() {
 133   return &_lock;
 134 }
 135 
 136 ZNMethodDataImmediateOops* ZNMethodData::immediate_oops() const {
 137   return OrderAccess::load_acquire(&_immediate_oops);
 138 }
 139 
 140 ZNMethodDataImmediateOops* ZNMethodData::swap_immediate_oops(const GrowableArray<oop*>& immediate_oops) {
 141   ZNMethodDataImmediateOops* const data_immediate_oops =
 142     immediate_oops.is_empty() ? NULL : ZNMethodDataImmediateOops::create(immediate_oops);
 143   return Atomic::xchg(data_immediate_oops, &_immediate_oops);
 144 }
 145 
 146 static ZNMethodData* gc_data(const nmethod* nm) {
 147   return nm->gc_data<ZNMethodData>();
 148 }
 149 
 150 static void set_gc_data(nmethod* nm, ZNMethodData* data) {
 151   return nm->set_gc_data<ZNMethodData>(data);
 152 }
 153 
 154 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
 155 size_t ZNMethodTable::_size = 0;
 156 ZLock ZNMethodTable::_iter_lock;
 157 ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
 158 size_t ZNMethodTable::_iter_table_size = 0;
 159 ZArray<void*> ZNMethodTable::_iter_deferred_deletes;
 160 size_t ZNMethodTable::_nregistered = 0;
 161 size_t ZNMethodTable::_nunregistered = 0;
 162 volatile size_t ZNMethodTable::_claimed = 0;
 163 
 164 void ZNMethodTable::safe_delete(void* data) {
 165   if (data == NULL) {
 166     return;
 167   }
 168 
 169   ZLocker<ZLock> locker(&_iter_lock);
 170   if (_iter_table != NULL) {
 171     // Iteration in progress, defer delete
 172     _iter_deferred_deletes.add(data);
 173   } else {
 174     // Iteration not in progress, delete now
 175     FREE_C_HEAP_ARRAY(uint8_t, data);
 176   }
 177 }
 178 
 179 ZNMethodTableEntry ZNMethodTable::create_entry(nmethod* nm) {
 180   GrowableArray<oop*> immediate_oops;
 181   bool non_immediate_oops = false;
 182 
 183   // Find all oops relocations
 184   RelocIterator iter(nm);
 185   while (iter.next()) {
 186     if (iter.type() != relocInfo::oop_type) {
 187       // Not an oop
 188       continue;
 189     }
 190 
 191     oop_Relocation* r = iter.oop_reloc();
 192 
 193     if (!r->oop_is_immediate()) {
 194       // Non-immediate oop found
 195       non_immediate_oops = true;
 196       continue;
 197     }
 198 
 199     if (r->oop_value() != NULL) {
 200       // Non-NULL immediate oop found. NULL oops can safely be
 201       // ignored since the method will be re-registered if they
 202       // are later patched to be non-NULL.
 203       immediate_oops.push(r->oop_addr());
 204     }
 205   }
 206 
 207   // Attach GC data to nmethod
 208   ZNMethodData* data = gc_data(nm);
 209   if (data == NULL) {
 210     data = ZNMethodData::create(nm);
 211     set_gc_data(nm, data);
 212   }
 213 
 214   // Attach immediate oops in GC data
 215   ZNMethodDataImmediateOops* const old_data_immediate_oops = data->swap_immediate_oops(immediate_oops);
 216   ZNMethodDataImmediateOops::destroy(old_data_immediate_oops);
 217 
 218   // Create entry
 219   return ZNMethodTableEntry(nm, non_immediate_oops, !immediate_oops.is_empty());
 220 }
 221 
 222 ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) {
 223   ZNMethodData* const data = gc_data(nm);
 224   if (data == NULL) {
 225     return NULL;
 226   }
 227   return data->lock();
 228 }
 229 
 230 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
 231   assert(is_power_of_2(size), "Invalid size");
 232   const size_t mask = size - 1;
 233   const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
 234   return hash & mask;
 235 }
 236 
 237 size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
 238   assert(is_power_of_2(size), "Invalid size");
 239   const size_t mask = size - 1;
 240   return (prev_index + 1) & mask;
 241 }
 242 
 243 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry) {
 244   const nmethod* const nm = entry.method();
 245   size_t index = first_index(nm, size);
 246 
 247   for (;;) {
 248     const ZNMethodTableEntry table_entry = table[index];
 249 
 250     if (!table_entry.registered() && !table_entry.unregistered()) {
 251       // Insert new entry
 252       table[index] = entry;
 253       return true;
 254     }
 255 
 256     if (table_entry.registered() && table_entry.method() == nm) {
 257       // Replace existing entry
 258       table[index] = entry;
 259       return false;
 260     }
 261 
 262     index = next_index(index, size);
 263   }
 264 }
 265 
 266 void ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 267   if (size == 0) {
 268     // Table is empty
 269     return;
 270   }
 271 
 272   size_t index = first_index(nm, size);
 273 
 274   for (;;) {
 275     const ZNMethodTableEntry table_entry = table[index];
 276     assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
 277 
 278     if (table_entry.registered() && table_entry.method() == nm) {
 279       // Remove entry
 280       table[index] = ZNMethodTableEntry(true /* unregistered */);
 281 
 282       // Destroy GC data
 283       ZNMethodData::destroy(gc_data(nm));
 284       set_gc_data(nm, NULL);
 285       return;
 286     }
 287 
 288     index = next_index(index, size);
 289   }
 290 }
 291 
 292 void ZNMethodTable::rebuild(size_t new_size) {
 293   ZLocker<ZLock> locker(&_iter_lock);
 294   assert(is_power_of_2(new_size), "Invalid size");
 295 
 296   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
 297                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
 298                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
 299                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
 300                          _size, new_size,
 301                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
 302                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 303 
 304   // Allocate new table
 305   ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
 306 
 307   // Transfer all registered entries
 308   for (size_t i = 0; i < _size; i++) {
 309     const ZNMethodTableEntry entry = _table[i];
 310     if (entry.registered()) {
 311       register_entry(new_table, new_size, entry);
 312     }
 313   }
 314 
 315   if (_iter_table != _table) {
 316     // Delete old table
 317     delete [] _table;
 318   }
 319 
 320   // Install new table
 321   _table = new_table;
 322   _size = new_size;
 323   _nunregistered = 0;
 324 }
 325 
 326 void ZNMethodTable::rebuild_if_needed() {
 327   // The hash table uses linear probing. To avoid wasting memory while
 328   // at the same time maintaining good hash collision behavior we want
 329   // to keep the table occupancy between 30% and 70%. The table always
 330   // grows/shrinks by doubling/halving its size. Pruning of unregistered
 331   // entries is done by rebuilding the table with or without resizing it.
 332   const size_t min_size = 1024;
 333   const size_t shrink_threshold = _size * 0.30;
 334   const size_t prune_threshold = _size * 0.65;
 335   const size_t grow_threshold = _size * 0.70;
 336 
 337   if (_size == 0) {
 338     // Initialize table
 339     rebuild(min_size);
 340   } else if (_nregistered < shrink_threshold && _size > min_size) {
 341     // Shrink table
 342     rebuild(_size / 2);
 343   } else if (_nregistered + _nunregistered > grow_threshold) {
 344     // Prune or grow table
 345     if (_nregistered < prune_threshold) {
 346       // Prune table
 347       rebuild(_size);
 348     } else {
 349       // Grow table
 350       rebuild(_size * 2);
 351     }
 352   }
 353 }
 354 
 355 void ZNMethodTable::log_register(const nmethod* nm, ZNMethodTableEntry entry) {
 356   LogTarget(Trace, gc, nmethod) log;
 357   if (!log.is_enabled()) {
 358     return;
 359   }
 360 
 361   log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
 362             "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
 363             nm->method()->method_holder()->external_name(),
 364             nm->method()->name()->as_C_string(),
 365             p2i(nm),
 366             nm->compiler_name(),
 367             nm->oops_count() - 1,
 368             entry.immediate_oops() ? gc_data(nm)->immediate_oops()->immediate_oops_count() : 0,
 369             entry.non_immediate_oops() ? "Yes" : "No");
 370 
 371   LogTarget(Trace, gc, nmethod, oops) log_oops;
 372   if (!log_oops.is_enabled()) {
 373     return;
 374   }
 375 
 376   // Print nmethod oops table
 377   oop* const begin = nm->oops_begin();
 378   oop* const end = nm->oops_end();
 379   for (oop* p = begin; p < end; p++) {
 380     log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
 381                    (p - begin), p2i(*p), (*p)->klass()->external_name());
 382   }
 383 
 384   if (entry.immediate_oops()) {
 385     // Print nmethod immediate oops
 386     const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
 387     if (nmi != NULL) {
 388       oop** const begin = nmi->immediate_oops_begin();
 389       oop** const end = nmi->immediate_oops_end();
 390       for (oop** p = begin; p < end; p++) {
 391         log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
 392                        (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
 393       }
 394     }
 395   }
 396 }
 397 
 398 void ZNMethodTable::log_unregister(const nmethod* nm) {
 399   LogTarget(Debug, gc, nmethod) log;
 400   if (!log.is_enabled()) {
 401     return;
 402   }
 403 
 404   log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
 405             nm->method()->method_holder()->external_name(),
 406             nm->method()->name()->as_C_string(),
 407             p2i(nm));
 408 }
 409 
 410 size_t ZNMethodTable::registered_nmethods() {
 411   return _nregistered;
 412 }
 413 
 414 size_t ZNMethodTable::unregistered_nmethods() {
 415   return _nunregistered;
 416 }
 417 
 418 void ZNMethodTable::register_nmethod(nmethod* nm) {
 419   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 420   ResourceMark rm;
 421 
 422   // Grow/Shrink/Prune table if needed
 423   rebuild_if_needed();
 424 
 425   // Create entry
 426   const ZNMethodTableEntry entry = create_entry(nm);
 427 
 428   log_register(nm, entry);
 429 
 430   // Insert new entry
 431   if (register_entry(_table, _size, entry)) {
 432     // New entry registered. When register_entry() instead returns
 433     // false the nmethod was already in the table so we do not want
 434     // to increase number of registered entries in that case.
 435     _nregistered++;
 436   }
 437 
 438   // Disarm nmethod entry barrier
 439   disarm_nmethod(nm);
 440 }
 441 
 442 void ZNMethodTable::wait_until_iteration_done() {
 443   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 444 
 445   while (_iter_table != NULL) {
 446     CodeCache_lock->wait(Monitor::_no_safepoint_check_flag);
 447   }
 448 }
 449 
 450 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
 451   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 452 
 453   if (Thread::current()->is_Code_cache_sweeper_thread()) {
 454     // The sweeper must wait for any ongoing iteration to complete
 455     // before it can unregister an nmethod.
 456     ZNMethodTable::wait_until_iteration_done();
 457   }
 458 
 459   ResourceMark rm;
 460 
 461   log_unregister(nm);
 462 
 463   // Remove entry
 464   unregister_entry(_table, _size, nm);
 465   _nunregistered++;
 466   _nregistered--;
 467 }
 468 
 469 void ZNMethodTable::disarm_nmethod(nmethod* nm) {
 470   BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 471   if (bs != NULL) {
 472     bs->disarm(nm);
 473   }
 474 }
 475 
 476 void ZNMethodTable::nmethod_entries_do_begin() {
 477   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 478   ZLocker<ZLock> locker(&_iter_lock);
 479 
 480   // Prepare iteration
 481   _iter_table = _table;
 482   _iter_table_size = _size;
 483   _claimed = 0;
 484   assert(_iter_deferred_deletes.is_empty(), "Should be emtpy");
 485 }
 486 
 487 void ZNMethodTable::nmethod_entries_do_end() {
 488   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 489   ZLocker<ZLock> locker(&_iter_lock);
 490 
 491   // Finish iteration
 492   if (_iter_table != _table) {
 493     delete [] _iter_table;
 494   }
 495   _iter_table = NULL;
 496 
 497   assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
 498 
 499   // Process deferred deletes
 500   ZArrayIterator<void*> iter(&_iter_deferred_deletes);
 501   for (void* data; iter.next(&data);) {
 502     FREE_C_HEAP_ARRAY(uint8_t, data);
 503   }
 504   _iter_deferred_deletes.clear();
 505 
 506   // Notify iteration done
 507   CodeCache_lock->notify_all();
 508 }
 509 
 510 void ZNMethodTable::entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl) {
 511   nmethod* const nm = entry.method();
 512 
 513   // Process oops table
 514   oop* const begin = nm->oops_begin();
 515   oop* const end = nm->oops_end();
 516   for (oop* p = begin; p < end; p++) {
 517     if (*p != Universe::non_oop_word()) {
 518       cl->do_oop(p);
 519     }
 520   }
 521 
 522   // Process immediate oops
 523   if (entry.immediate_oops()) {
 524     const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
 525     if (nmi != NULL) {
 526       oop** const begin = nmi->immediate_oops_begin();
 527       oop** const end = nmi->immediate_oops_end();
 528       for (oop** p = begin; p < end; p++) {
 529         if (**p != Universe::non_oop_word()) {
 530           cl->do_oop(*p);
 531         }
 532       }
 533     }
 534   }
 535 
 536   // Process non-immediate oops
 537   if (entry.non_immediate_oops()) {
 538     nmethod* const nm = entry.method();
 539     nm->fix_oop_relocations();
 540   }
 541 }
 542 
 543 class ZNMethodTableEntryToOopsDo : public ZNMethodTableEntryClosure {
 544 private:
 545   OopClosure* _cl;
 546 
 547 public:
 548   ZNMethodTableEntryToOopsDo(OopClosure* cl) :
 549       _cl(cl) {}
 550 
 551   void do_nmethod_entry(ZNMethodTableEntry entry) {
 552     ZNMethodTable::entry_oops_do(entry, _cl);
 553   }
 554 };
 555 
 556 void ZNMethodTable::oops_do(OopClosure* cl) {
 557   ZNMethodTableEntryToOopsDo entry_cl(cl);
 558   nmethod_entries_do(&entry_cl);
 559 }
 560 
 561 void ZNMethodTable::nmethod_entries_do(ZNMethodTableEntryClosure* cl) {
 562   for (;;) {
 563     // Claim table partition. Each partition is currently sized to span
 564     // two cache lines. This number is just a guess, but seems to work well.
 565     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
 566     const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _iter_table_size);
 567     const size_t partition_end = MIN2(partition_start + partition_size, _iter_table_size);
 568     if (partition_start == partition_end) {
 569       // End of table
 570       break;
 571     }
 572 
 573     // Process table partition
 574     for (size_t i = partition_start; i < partition_end; i++) {
 575       const ZNMethodTableEntry entry = _iter_table[i];
 576       if (entry.registered()) {
 577         cl->do_nmethod_entry(entry);
 578       }
 579     }
 580   }
 581 }
 582 
 583 class ZNMethodTableUnlinkClosure : public ZNMethodTableEntryClosure {
 584 private:
 585   bool          _unloading_occurred;
 586   volatile bool _failed;
 587 
 588   void set_failed() {
 589     Atomic::store(true, &_failed);
 590   }
 591 
 592 public:
 593   ZNMethodTableUnlinkClosure(bool unloading_occurred) :
 594       _unloading_occurred(unloading_occurred),
 595       _failed(false) {}
 596 
 597   virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
 598     if (failed()) {
 599       return;
 600     }
 601 
 602     nmethod* const nm = entry.method();
 603     if (!nm->is_alive()) {
 604       return;
 605     }
 606 
 607     ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
 608 
 609     if (nm->is_unloading()) {
 610       // Unlinking of the dependencies must happen before the
 611       // handshake separating unlink and purge.
 612       nm->flush_dependencies(false /* delete_immediately */);
 613 
 614       // We don't need to take the lock when unlinking nmethods from
 615       // the Method, because it is only concurrently unlinked by
 616       // the entry barrier, which acquires the per nmethod lock.
 617       nm->unlink_from_method(false /* acquire_lock */);
 618       return;
 619     }
 620 
 621     // Heal oops and disarm
 622     ZNMethodOopClosure cl;
 623     ZNMethodTable::entry_oops_do(entry, &cl);
 624     ZNMethodTable::disarm_nmethod(nm);
 625 
 626     // Clear compiled ICs and exception caches
 627     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 628       set_failed();
 629     }
 630   }
 631 
 632   bool failed() const {
 633     return Atomic::load(&_failed);
 634   }
 635 };
 636 
 637 class ZNMethodTableUnlinkTask : public ZTask {
 638 private:
 639   ZNMethodTableUnlinkClosure _cl;
 640   ICRefillVerifier*          _verifier;
 641 
 642 public:
 643   ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 644       ZTask("ZNMethodTableUnlinkTask"),
 645       _cl(unloading_occurred),
 646       _verifier(verifier) {
 647     ZNMethodTable::nmethod_entries_do_begin();
 648   }
 649 
 650   ~ZNMethodTableUnlinkTask() {
 651     ZNMethodTable::nmethod_entries_do_end();
 652   }
 653 
 654   virtual void work() {
 655     ICRefillVerifierMark mark(_verifier);
 656     ZNMethodTable::nmethod_entries_do(&_cl);
 657   }
 658 
 659   bool success() const {
 660     return !_cl.failed();
 661   }
 662 };
 663 
 664 void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) {
 665   for (;;) {
 666     ICRefillVerifier verifier;
 667 
 668     {
 669       ZNMethodTableUnlinkTask task(unloading_occurred, &verifier);
 670       workers->run_concurrent(&task);
 671       if (task.success()) {
 672         return;
 673       }
 674     }
 675 
 676     // Cleaning failed because we ran out of transitional IC stubs,
 677     // so we have to refill and try again. Refilling requires taking
 678     // a safepoint, so we temporarily leave the suspendible thread set.
 679     SuspendibleThreadSetLeaver sts;
 680     InlineCacheBuffer::refill_ic_stubs();
 681   }
 682 }
 683 
 684 class ZNMethodTablePurgeClosure : public ZNMethodTableEntryClosure {
 685 public:
 686   virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
 687     nmethod* const nm = entry.method();
 688     if (nm->is_alive() && nm->is_unloading()) {
 689       nm->make_unloaded();
 690     }
 691   }
 692 };
 693 
 694 class ZNMethodTablePurgeTask : public ZTask {
 695 private:
 696   ZNMethodTablePurgeClosure _cl;
 697 
 698 public:
 699   ZNMethodTablePurgeTask() :
 700       ZTask("ZNMethodTablePurgeTask"),
 701       _cl() {
 702     ZNMethodTable::nmethod_entries_do_begin();
 703   }
 704 
 705   ~ZNMethodTablePurgeTask() {
 706     ZNMethodTable::nmethod_entries_do_end();
 707   }
 708 
 709   virtual void work() {
 710     ZNMethodTable::nmethod_entries_do(&_cl);
 711   }
 712 };
 713 
 714 void ZNMethodTable::purge(ZWorkers* workers) {
 715   ZNMethodTablePurgeTask task;
 716   workers->run_concurrent(&task);
 717 }