1 /*
   2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/relocInfo.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/z/zArray.inline.hpp"
  31 #include "gc/z/zGlobals.hpp"
  32 #include "gc/z/zHash.inline.hpp"
  33 #include "gc/z/zLock.inline.hpp"
  34 #include "gc/z/zNMethodTable.hpp"
  35 #include "gc/z/zOopClosures.inline.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zWorkers.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/orderAccess.hpp"
  43 #include "runtime/os.hpp"
  44 #include "utilities/debug.hpp"
  45 
  46 class ZNMethodDataImmediateOops {
  47 private:
  48   const size_t _nimmediate_oops;
  49 
  50   static size_t header_size();
  51 
  52   ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops);
  53 
  54 public:
  55   static ZNMethodDataImmediateOops* create(const GrowableArray<oop*>& immediate_oops);
  56   static void destroy(ZNMethodDataImmediateOops* data_immediate_oops);
  57 
  58   size_t immediate_oops_count() const;
  59   oop** immediate_oops_begin() const;
  60   oop** immediate_oops_end() const;
  61 };
  62 
  63 size_t ZNMethodDataImmediateOops::header_size() {
  64   const size_t size = sizeof(ZNMethodDataImmediateOops);
  65   assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
  66   return size;
  67 }
  68 
  69 ZNMethodDataImmediateOops* ZNMethodDataImmediateOops::create(const GrowableArray<oop*>& immediate_oops) {
  70   // Allocate memory for the ZNMethodDataImmediateOops object
  71   // plus the immediate oop* array that follows right after.
  72   const size_t size = ZNMethodDataImmediateOops::header_size() + (sizeof(oop*) * immediate_oops.length());
  73   void* const data_immediate_oops = NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
  74   return ::new (data_immediate_oops) ZNMethodDataImmediateOops(immediate_oops);
  75 }
  76 
  77 void ZNMethodDataImmediateOops::destroy(ZNMethodDataImmediateOops* data_immediate_oops) {
  78   ZNMethodTable::safe_delete(data_immediate_oops);
  79 }
  80 
  81 ZNMethodDataImmediateOops::ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops) :
  82     _nimmediate_oops(immediate_oops.length()) {
  83   // Save all immediate oops
  84   for (size_t i = 0; i < _nimmediate_oops; i++) {
  85     immediate_oops_begin()[i] = immediate_oops.at(i);
  86   }
  87 }
  88 
  89 size_t ZNMethodDataImmediateOops::immediate_oops_count() const {
  90   return _nimmediate_oops;
  91 }
  92 
  93 oop** ZNMethodDataImmediateOops::immediate_oops_begin() const {
  94   // The immediate oop* array starts immediately after this object
  95   return (oop**)((uintptr_t)this + header_size());
  96 }
  97 
  98 oop** ZNMethodDataImmediateOops::immediate_oops_end() const {
  99   return immediate_oops_begin() + immediate_oops_count();
 100 }
 101 
 102 class ZNMethodData {
 103 private:
 104   ZReentrantLock                      _lock;
 105   ZNMethodDataImmediateOops* volatile _immediate_oops;
 106 
 107   ZNMethodData(nmethod* nm);
 108 
 109 public:
 110   static ZNMethodData* create(nmethod* nm);
 111   static void destroy(ZNMethodData* data);
 112 
 113   ZReentrantLock* lock();
 114 
 115   ZNMethodDataImmediateOops* immediate_oops() const;
 116   ZNMethodDataImmediateOops* swap_immediate_oops(const GrowableArray<oop*>& immediate_oops);
 117 };
 118 
 119 ZNMethodData* ZNMethodData::create(nmethod* nm) {
 120   void* const method = NEW_C_HEAP_ARRAY(uint8_t, sizeof(ZNMethodData), mtGC);
 121   return ::new (method) ZNMethodData(nm);
 122 }
 123 
 124 void ZNMethodData::destroy(ZNMethodData* data) {
 125   ZNMethodDataImmediateOops::destroy(data->immediate_oops());
 126   ZNMethodTable::safe_delete(data);
 127 }
 128 
 129 ZNMethodData::ZNMethodData(nmethod* nm) :
 130     _lock(),
 131     _immediate_oops(NULL) {}
 132 
 133 ZReentrantLock* ZNMethodData::lock() {
 134   return &_lock;
 135 }
 136 
 137 ZNMethodDataImmediateOops* ZNMethodData::immediate_oops() const {
 138   return OrderAccess::load_acquire(&_immediate_oops);
 139 }
 140 
 141 ZNMethodDataImmediateOops* ZNMethodData::swap_immediate_oops(const GrowableArray<oop*>& immediate_oops) {
 142   ZNMethodDataImmediateOops* const data_immediate_oops =
 143     immediate_oops.is_empty() ? NULL : ZNMethodDataImmediateOops::create(immediate_oops);
 144   return Atomic::xchg(data_immediate_oops, &_immediate_oops);
 145 }
 146 
 147 static ZNMethodData* gc_data(const nmethod* nm) {
 148   return nm->gc_data<ZNMethodData>();
 149 }
 150 
 151 static void set_gc_data(nmethod* nm, ZNMethodData* data) {
 152   return nm->set_gc_data<ZNMethodData>(data);
 153 }
 154 
 155 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
 156 size_t ZNMethodTable::_size = 0;
 157 ZLock ZNMethodTable::_iter_lock;
 158 ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
 159 size_t ZNMethodTable::_iter_table_size = 0;
 160 ZArray<void*> ZNMethodTable::_iter_deferred_deletes;
 161 size_t ZNMethodTable::_nregistered = 0;
 162 size_t ZNMethodTable::_nunregistered = 0;
 163 volatile size_t ZNMethodTable::_claimed = 0;
 164 
 165 void ZNMethodTable::safe_delete(void* data) {
 166   if (data == NULL) {
 167     return;
 168   }
 169 
 170   ZLocker<ZLock> locker(&_iter_lock);
 171   if (_iter_table != NULL) {
 172     // Iteration in progress, defer delete
 173     _iter_deferred_deletes.add(data);
 174   } else {
 175     // Iteration not in progress, delete now
 176     FREE_C_HEAP_ARRAY(uint8_t, data);
 177   }
 178 }
 179 
 180 ZNMethodTableEntry ZNMethodTable::create_entry(nmethod* nm) {
 181   GrowableArray<oop*> immediate_oops;
 182   bool non_immediate_oops = false;
 183 
 184   // Find all oops relocations
 185   RelocIterator iter(nm);
 186   while (iter.next()) {
 187     if (iter.type() != relocInfo::oop_type) {
 188       // Not an oop
 189       continue;
 190     }
 191 
 192     oop_Relocation* r = iter.oop_reloc();
 193 
 194     if (!r->oop_is_immediate()) {
 195       // Non-immediate oop found
 196       non_immediate_oops = true;
 197       continue;
 198     }
 199 
 200     if (r->oop_value() != NULL) {
 201       // Non-NULL immediate oop found. NULL oops can safely be
 202       // ignored since the method will be re-registered if they
 203       // are later patched to be non-NULL.
 204       immediate_oops.push(r->oop_addr());
 205     }
 206   }
 207 
 208   // Attach GC data to nmethod
 209   ZNMethodData* data = gc_data(nm);
 210   if (data == NULL) {
 211     data = ZNMethodData::create(nm);
 212     set_gc_data(nm, data);
 213   }
 214 
 215   // Attach immediate oops in GC data
 216   ZNMethodDataImmediateOops* const old_data_immediate_oops = data->swap_immediate_oops(immediate_oops);
 217   ZNMethodDataImmediateOops::destroy(old_data_immediate_oops);
 218 
 219   // Create entry
 220   return ZNMethodTableEntry(nm, non_immediate_oops, !immediate_oops.is_empty());
 221 }
 222 
 223 ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) {
 224   ZNMethodData* const data = gc_data(nm);
 225   if (data == NULL) {
 226     return NULL;
 227   }
 228   return data->lock();
 229 }
 230 
 231 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
 232   assert(is_power_of_2(size), "Invalid size");
 233   const size_t mask = size - 1;
 234   const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
 235   return hash & mask;
 236 }
 237 
 238 size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
 239   assert(is_power_of_2(size), "Invalid size");
 240   const size_t mask = size - 1;
 241   return (prev_index + 1) & mask;
 242 }
 243 
 244 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry) {
 245   const nmethod* const nm = entry.method();
 246   size_t index = first_index(nm, size);
 247 
 248   for (;;) {
 249     const ZNMethodTableEntry table_entry = table[index];
 250 
 251     if (!table_entry.registered() && !table_entry.unregistered()) {
 252       // Insert new entry
 253       table[index] = entry;
 254       return true;
 255     }
 256 
 257     if (table_entry.registered() && table_entry.method() == nm) {
 258       // Replace existing entry
 259       table[index] = entry;
 260       return false;
 261     }
 262 
 263     index = next_index(index, size);
 264   }
 265 }
 266 
 267 void ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 268   if (size == 0) {
 269     // Table is empty
 270     return;
 271   }
 272 
 273   size_t index = first_index(nm, size);
 274 
 275   for (;;) {
 276     const ZNMethodTableEntry table_entry = table[index];
 277     assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
 278 
 279     if (table_entry.registered() && table_entry.method() == nm) {
 280       // Remove entry
 281       table[index] = ZNMethodTableEntry(true /* unregistered */);
 282 
 283       // Destroy GC data
 284       ZNMethodData::destroy(gc_data(nm));
 285       set_gc_data(nm, NULL);
 286       return;
 287     }
 288 
 289     index = next_index(index, size);
 290   }
 291 }
 292 
 293 void ZNMethodTable::rebuild(size_t new_size) {
 294   ZLocker<ZLock> locker(&_iter_lock);
 295   assert(is_power_of_2(new_size), "Invalid size");
 296 
 297   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
 298                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
 299                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
 300                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
 301                          _size, new_size,
 302                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
 303                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 304 
 305   // Allocate new table
 306   ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
 307 
 308   // Transfer all registered entries
 309   for (size_t i = 0; i < _size; i++) {
 310     const ZNMethodTableEntry entry = _table[i];
 311     if (entry.registered()) {
 312       register_entry(new_table, new_size, entry);
 313     }
 314   }
 315 
 316   if (_iter_table != _table) {
 317     // Delete old table
 318     delete [] _table;
 319   }
 320 
 321   // Install new table
 322   _table = new_table;
 323   _size = new_size;
 324   _nunregistered = 0;
 325 }
 326 
 327 void ZNMethodTable::rebuild_if_needed() {
 328   // The hash table uses linear probing. To avoid wasting memory while
 329   // at the same time maintaining good hash collision behavior we want
 330   // to keep the table occupancy between 30% and 70%. The table always
 331   // grows/shrinks by doubling/halving its size. Pruning of unregistered
 332   // entries is done by rebuilding the table with or without resizing it.
 333   const size_t min_size = 1024;
 334   const size_t shrink_threshold = _size * 0.30;
 335   const size_t prune_threshold = _size * 0.65;
 336   const size_t grow_threshold = _size * 0.70;
 337 
 338   if (_size == 0) {
 339     // Initialize table
 340     rebuild(min_size);
 341   } else if (_nregistered < shrink_threshold && _size > min_size) {
 342     // Shrink table
 343     rebuild(_size / 2);
 344   } else if (_nregistered + _nunregistered > grow_threshold) {
 345     // Prune or grow table
 346     if (_nregistered < prune_threshold) {
 347       // Prune table
 348       rebuild(_size);
 349     } else {
 350       // Grow table
 351       rebuild(_size * 2);
 352     }
 353   }
 354 }
 355 
 356 void ZNMethodTable::log_register(const nmethod* nm, ZNMethodTableEntry entry) {
 357   LogTarget(Trace, gc, nmethod) log;
 358   if (!log.is_enabled()) {
 359     return;
 360   }
 361 
 362   log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
 363             "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
 364             nm->method()->method_holder()->external_name(),
 365             nm->method()->name()->as_C_string(),
 366             p2i(nm),
 367             nm->compiler_name(),
 368             nm->oops_count() - 1,
 369             entry.immediate_oops() ? gc_data(nm)->immediate_oops()->immediate_oops_count() : 0,
 370             entry.non_immediate_oops() ? "Yes" : "No");
 371 
 372   LogTarget(Trace, gc, nmethod, oops) log_oops;
 373   if (!log_oops.is_enabled()) {
 374     return;
 375   }
 376 
 377   // Print nmethod oops table
 378   oop* const begin = nm->oops_begin();
 379   oop* const end = nm->oops_end();
 380   for (oop* p = begin; p < end; p++) {
 381     log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
 382                    (p - begin), p2i(*p), (*p)->klass()->external_name());
 383   }
 384 
 385   if (entry.immediate_oops()) {
 386     // Print nmethod immediate oops
 387     const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
 388     if (nmi != NULL) {
 389       oop** const begin = nmi->immediate_oops_begin();
 390       oop** const end = nmi->immediate_oops_end();
 391       for (oop** p = begin; p < end; p++) {
 392         log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
 393                        (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
 394       }
 395     }
 396   }
 397 }
 398 
 399 void ZNMethodTable::log_unregister(const nmethod* nm) {
 400   LogTarget(Debug, gc, nmethod) log;
 401   if (!log.is_enabled()) {
 402     return;
 403   }
 404 
 405   log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
 406             nm->method()->method_holder()->external_name(),
 407             nm->method()->name()->as_C_string(),
 408             p2i(nm));
 409 }
 410 
 411 size_t ZNMethodTable::registered_nmethods() {
 412   return _nregistered;
 413 }
 414 
 415 size_t ZNMethodTable::unregistered_nmethods() {
 416   return _nunregistered;
 417 }
 418 
 419 void ZNMethodTable::register_nmethod(nmethod* nm) {
 420   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 421   ResourceMark rm;
 422 
 423   // Grow/Shrink/Prune table if needed
 424   rebuild_if_needed();
 425 
 426   // Create entry
 427   const ZNMethodTableEntry entry = create_entry(nm);
 428 
 429   log_register(nm, entry);
 430 
 431   // Insert new entry
 432   if (register_entry(_table, _size, entry)) {
 433     // New entry registered. When register_entry() instead returns
 434     // false the nmethod was already in the table so we do not want
 435     // to increase number of registered entries in that case.
 436     _nregistered++;
 437   }
 438 
 439   // Disarm nmethod entry barrier
 440   disarm_nmethod(nm);
 441 }
 442 
 443 void ZNMethodTable::sweeper_wait_for_iteration() {
 444   // The sweeper must wait for any ongoing iteration to complete
 445   // before it can unregister an nmethod.
 446   if (!Thread::current()->is_Code_cache_sweeper_thread()) {
 447     return;
 448   }
 449 
 450   while (_iter_table != NULL) {
 451     MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 452     os::naked_short_sleep(1);
 453   }
 454 }
 455 
 456 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
 457   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 458   ResourceMark rm;
 459 
 460   sweeper_wait_for_iteration();
 461 
 462   log_unregister(nm);
 463 
 464   // Remove entry
 465   unregister_entry(_table, _size, nm);
 466   _nunregistered++;
 467   _nregistered--;
 468 }
 469 
 470 void ZNMethodTable::disarm_nmethod(nmethod* nm) {
 471   BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 472   if (bs != NULL) {
 473     bs->disarm(nm);
 474   }
 475 }
 476 
 477 void ZNMethodTable::nmethod_entries_do_begin() {
 478   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 479   ZLocker<ZLock> locker(&_iter_lock);
 480 
 481   // Prepare iteration
 482   _iter_table = _table;
 483   _iter_table_size = _size;
 484   _claimed = 0;
 485   assert(_iter_deferred_deletes.is_empty(), "Should be emtpy");
 486 }
 487 
 488 void ZNMethodTable::nmethod_entries_do_end() {
 489   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 490   ZLocker<ZLock> locker(&_iter_lock);
 491 
 492   // Finish iteration
 493   if (_iter_table != _table) {
 494     delete [] _iter_table;
 495   }
 496   _iter_table = NULL;
 497   assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
 498 
 499   // Process deferred deletes
 500   ZArrayIterator<void*> iter(&_iter_deferred_deletes);
 501   for (void* data; iter.next(&data);) {
 502     FREE_C_HEAP_ARRAY(uint8_t, data);
 503   }
 504   _iter_deferred_deletes.clear();
 505 }
 506 
 507 void ZNMethodTable::entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl) {
 508   nmethod* const nm = entry.method();
 509 
 510   // Process oops table
 511   oop* const begin = nm->oops_begin();
 512   oop* const end = nm->oops_end();
 513   for (oop* p = begin; p < end; p++) {
 514     if (*p != Universe::non_oop_word()) {
 515       cl->do_oop(p);
 516     }
 517   }
 518 
 519   // Process immediate oops
 520   if (entry.immediate_oops()) {
 521     const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
 522     if (nmi != NULL) {
 523       oop** const begin = nmi->immediate_oops_begin();
 524       oop** const end = nmi->immediate_oops_end();
 525       for (oop** p = begin; p < end; p++) {
 526         if (**p != Universe::non_oop_word()) {
 527           cl->do_oop(*p);
 528         }
 529       }
 530     }
 531   }
 532 
 533   // Process non-immediate oops
 534   if (entry.non_immediate_oops()) {
 535     nmethod* const nm = entry.method();
 536     nm->fix_oop_relocations();
 537   }
 538 }
 539 
 540 class ZNMethodTableEntryToOopsDo : public ZNMethodTableEntryClosure {
 541 private:
 542   OopClosure* _cl;
 543 
 544 public:
 545   ZNMethodTableEntryToOopsDo(OopClosure* cl) :
 546       _cl(cl) {}
 547 
 548   void do_nmethod_entry(ZNMethodTableEntry entry) {
 549     ZNMethodTable::entry_oops_do(entry, _cl);
 550   }
 551 };
 552 
 553 void ZNMethodTable::oops_do(OopClosure* cl) {
 554   ZNMethodTableEntryToOopsDo entry_cl(cl);
 555   nmethod_entries_do(&entry_cl);
 556 }
 557 
 558 void ZNMethodTable::nmethod_entries_do(ZNMethodTableEntryClosure* cl) {
 559   for (;;) {
 560     // Claim table partition. Each partition is currently sized to span
 561     // two cache lines. This number is just a guess, but seems to work well.
 562     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
 563     const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _iter_table_size);
 564     const size_t partition_end = MIN2(partition_start + partition_size, _iter_table_size);
 565     if (partition_start == partition_end) {
 566       // End of table
 567       break;
 568     }
 569 
 570     // Process table partition
 571     for (size_t i = partition_start; i < partition_end; i++) {
 572       const ZNMethodTableEntry entry = _iter_table[i];
 573       if (entry.registered()) {
 574         cl->do_nmethod_entry(entry);
 575       }
 576     }
 577   }
 578 }
 579 
 580 class ZNMethodTableUnlinkClosure : public ZNMethodTableEntryClosure {
 581 private:
 582   bool          _unloading_occurred;
 583   volatile bool _failed;
 584 
 585   void set_failed() {
 586     Atomic::store(true, &_failed);
 587   }
 588 
 589 public:
 590   ZNMethodTableUnlinkClosure(bool unloading_occurred) :
 591       _unloading_occurred(unloading_occurred),
 592       _failed(false) {}
 593 
 594   virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
 595     if (failed()) {
 596       return;
 597     }
 598 
 599     nmethod* const nm = entry.method();
 600     if (!nm->is_alive()) {
 601       return;
 602     }
 603 
 604     ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
 605 
 606     if (nm->is_unloading()) {
 607       // Unlinking of the dependencies must happen before the
 608       // handshake separating unlink and purge.
 609       nm->flush_dependencies(false /* delete_immediately */);
 610 
 611       // We don't need to take the lock when unlinking nmethods from
 612       // the Method, because it is only concurrently unlinked by
 613       // the entry barrier, which acquires the per nmethod lock.
 614       nm->unlink_from_method(false /* acquire_lock */);
 615       return;
 616     }
 617 
 618     // Heal oops and disarm
 619     ZNMethodOopClosure cl;
 620     ZNMethodTable::entry_oops_do(entry, &cl);
 621     ZNMethodTable::disarm_nmethod(nm);
 622 
 623     // Clear compiled ICs and exception caches
 624     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 625       set_failed();
 626     }
 627   }
 628 
 629   bool failed() const {
 630     return Atomic::load(&_failed);
 631   }
 632 };
 633 
 634 class ZNMethodTableUnlinkTask : public ZTask {
 635 private:
 636   ZNMethodTableUnlinkClosure _cl;
 637   ICRefillVerifier*          _verifier;
 638 
 639 public:
 640   ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 641       ZTask("ZNMethodTableUnlinkTask"),
 642       _cl(unloading_occurred),
 643       _verifier(verifier) {
 644     ZNMethodTable::nmethod_entries_do_begin();
 645   }
 646 
 647   ~ZNMethodTableUnlinkTask() {
 648     ZNMethodTable::nmethod_entries_do_end();
 649   }
 650 
 651   virtual void work() {
 652     ICRefillVerifierMark mark(_verifier);
 653     ZNMethodTable::nmethod_entries_do(&_cl);
 654   }
 655 
 656   bool success() const {
 657     return !_cl.failed();
 658   }
 659 };
 660 
 661 void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) {
 662   for (;;) {
 663     ICRefillVerifier verifier;
 664 
 665     {
 666       ZNMethodTableUnlinkTask task(unloading_occurred, &verifier);
 667       workers->run_concurrent(&task);
 668       if (task.success()) {
 669         return;
 670       }
 671     }
 672 
 673     // Cleaning failed because we ran out of transitional IC stubs,
 674     // so we have to refill and try again. Refilling requires taking
 675     // a safepoint, so we temporarily leave the suspendible thread set.
 676     SuspendibleThreadSetLeaver sts;
 677     InlineCacheBuffer::refill_ic_stubs();
 678   }
 679 }
 680 
 681 class ZNMethodTablePurgeClosure : public ZNMethodTableEntryClosure {
 682 public:
 683   virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
 684     nmethod* const nm = entry.method();
 685     if (nm->is_alive() && nm->is_unloading()) {
 686       nm->make_unloaded();
 687     }
 688   }
 689 };
 690 
 691 class ZNMethodTablePurgeTask : public ZTask {
 692 private:
 693   ZNMethodTablePurgeClosure _cl;
 694 
 695 public:
 696   ZNMethodTablePurgeTask() :
 697       ZTask("ZNMethodTablePurgeTask"),
 698       _cl() {
 699     ZNMethodTable::nmethod_entries_do_begin();
 700   }
 701 
 702   ~ZNMethodTablePurgeTask() {
 703     ZNMethodTable::nmethod_entries_do_end();
 704   }
 705 
 706   virtual void work() {
 707     ZNMethodTable::nmethod_entries_do(&_cl);
 708   }
 709 };
 710 
 711 void ZNMethodTable::purge(ZWorkers* workers) {
 712   ZNMethodTablePurgeTask task;
 713   workers->run_concurrent(&task);
 714 }