< prev index next >

src/hotspot/share/gc/z/zNMethodTable.cpp

Print this page




  25 #include "code/relocInfo.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/z/zArray.inline.hpp"
  31 #include "gc/z/zGlobals.hpp"
  32 #include "gc/z/zHash.inline.hpp"
  33 #include "gc/z/zLock.inline.hpp"
  34 #include "gc/z/zNMethodTable.hpp"
  35 #include "gc/z/zOopClosures.inline.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zWorkers.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/orderAccess.hpp"
  43 #include "utilities/debug.hpp"
  44 
  45 class ZNMethodDataImmediateOops {
  46 private:
  47   const size_t _nimmediate_oops;

  48 
  49   static size_t header_size();
  50 
  51   ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops);
  52 
  53 public:
  54   static ZNMethodDataImmediateOops* create(const GrowableArray<oop*>& immediate_oops);
  55   static void destroy(ZNMethodDataImmediateOops* data_immediate_oops);
  56 
  57   size_t immediate_oops_count() const;
  58   oop** immediate_oops_begin() const;
  59   oop** immediate_oops_end() const;


  60 };
  61 
  62 size_t ZNMethodDataImmediateOops::header_size() {
  63   const size_t size = sizeof(ZNMethodDataImmediateOops);
  64   assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
  65   return size;
  66 }
  67 
  68 ZNMethodDataImmediateOops* ZNMethodDataImmediateOops::create(const GrowableArray<oop*>& immediate_oops) {
  69   // Allocate memory for the ZNMethodDataImmediateOops object
  70   // plus the immediate oop* array that follows right after.
  71   const size_t size = ZNMethodDataImmediateOops::header_size() + (sizeof(oop*) * immediate_oops.length());
  72   void* const data_immediate_oops = NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
  73   return ::new (data_immediate_oops) ZNMethodDataImmediateOops(immediate_oops);
  74 }
  75 
  76 void ZNMethodDataImmediateOops::destroy(ZNMethodDataImmediateOops* data_immediate_oops) {
  77   ZNMethodTable::safe_delete(data_immediate_oops);
  78 }
  79 
  80 ZNMethodDataImmediateOops::ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops) :
  81     _nimmediate_oops(immediate_oops.length()) {

  82   // Save all immediate oops
  83   for (size_t i = 0; i < _nimmediate_oops; i++) {
  84     immediate_oops_begin()[i] = immediate_oops.at(i);
  85   }
  86 }
  87 
  88 size_t ZNMethodDataImmediateOops::immediate_oops_count() const {
  89   return _nimmediate_oops;
  90 }
  91 
  92 oop** ZNMethodDataImmediateOops::immediate_oops_begin() const {
  93   // The immediate oop* array starts immediately after this object
  94   return (oop**)((uintptr_t)this + header_size());
  95 }
  96 
  97 oop** ZNMethodDataImmediateOops::immediate_oops_end() const {
  98   return immediate_oops_begin() + immediate_oops_count();




  99 }
 100 
 101 class ZNMethodData {
 102 private:
 103   ZReentrantLock                      _lock;
 104   ZNMethodDataImmediateOops* volatile _immediate_oops;
 105 
 106   ZNMethodData(nmethod* nm);
 107 
 108 public:
 109   static ZNMethodData* create(nmethod* nm);
 110   static void destroy(ZNMethodData* data);
 111 
 112   ZReentrantLock* lock();
 113 
 114   ZNMethodDataImmediateOops* immediate_oops() const;
 115   ZNMethodDataImmediateOops* swap_immediate_oops(const GrowableArray<oop*>& immediate_oops);
 116 };
 117 
 118 ZNMethodData* ZNMethodData::create(nmethod* nm) {
 119   void* const method = NEW_C_HEAP_ARRAY(uint8_t, sizeof(ZNMethodData), mtGC);
 120   return ::new (method) ZNMethodData(nm);
 121 }
 122 
 123 void ZNMethodData::destroy(ZNMethodData* data) {
 124   ZNMethodDataImmediateOops::destroy(data->immediate_oops());
 125   ZNMethodTable::safe_delete(data);
 126 }
 127 
 128 ZNMethodData::ZNMethodData(nmethod* nm) :
 129     _lock(),
 130     _immediate_oops(NULL) {}
 131 
 132 ZReentrantLock* ZNMethodData::lock() {
 133   return &_lock;
 134 }
 135 
 136 ZNMethodDataImmediateOops* ZNMethodData::immediate_oops() const {
 137   return OrderAccess::load_acquire(&_immediate_oops);
 138 }
 139 
 140 ZNMethodDataImmediateOops* ZNMethodData::swap_immediate_oops(const GrowableArray<oop*>& immediate_oops) {
 141   ZNMethodDataImmediateOops* const data_immediate_oops =
 142     immediate_oops.is_empty() ? NULL : ZNMethodDataImmediateOops::create(immediate_oops);
 143   return Atomic::xchg(data_immediate_oops, &_immediate_oops);
 144 }
 145 
 146 static ZNMethodData* gc_data(const nmethod* nm) {
 147   return nm->gc_data<ZNMethodData>();
 148 }
 149 
 150 static void set_gc_data(nmethod* nm, ZNMethodData* data) {
 151   return nm->set_gc_data<ZNMethodData>(data);
 152 }
 153 
 154 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
 155 size_t ZNMethodTable::_size = 0;
 156 ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
 157 size_t ZNMethodTable::_iter_table_size = 0;
 158 ZArray<void*> ZNMethodTable::_iter_deferred_deletes;
 159 size_t ZNMethodTable::_nregistered = 0;
 160 size_t ZNMethodTable::_nunregistered = 0;
 161 volatile size_t ZNMethodTable::_claimed = 0;
 162 
 163 void ZNMethodTable::safe_delete(void* data) {
 164   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 165 
 166   if (data == NULL) {
 167     return;
 168   }
 169 
 170   if (_iter_table != NULL) {
 171     // Iteration in progress, defer delete
 172     _iter_deferred_deletes.add(data);
 173   } else {
 174     // Iteration not in progress, delete now
 175     FREE_C_HEAP_ARRAY(uint8_t, data);
 176   }
 177 }
 178 
 179 ZNMethodTableEntry ZNMethodTable::create_entry(nmethod* nm) {
 180   GrowableArray<oop*> immediate_oops;
 181   bool non_immediate_oops = false;
 182 
 183   // Find all oops relocations
 184   RelocIterator iter(nm);
 185   while (iter.next()) {
 186     if (iter.type() != relocInfo::oop_type) {
 187       // Not an oop
 188       continue;
 189     }
 190 
 191     oop_Relocation* r = iter.oop_reloc();
 192 
 193     if (!r->oop_is_immediate()) {
 194       // Non-immediate oop found
 195       non_immediate_oops = true;
 196       continue;
 197     }
 198 
 199     if (r->oop_value() != NULL) {
 200       // Non-NULL immediate oop found. NULL oops can safely be
 201       // ignored since the method will be re-registered if they
 202       // are later patched to be non-NULL.
 203       immediate_oops.push(r->oop_addr());
 204     }
 205   }
 206 
 207   // Attach GC data to nmethod
 208   ZNMethodData* data = gc_data(nm);
 209   if (data == NULL) {
 210     data = ZNMethodData::create(nm);
 211     set_gc_data(nm, data);
 212   }
 213 
 214   // Attach immediate oops in GC data
 215   ZNMethodDataImmediateOops* const old_data_immediate_oops = data->swap_immediate_oops(immediate_oops);
 216   ZNMethodDataImmediateOops::destroy(old_data_immediate_oops);


 217 
 218   // Create entry
 219   return ZNMethodTableEntry(nm, non_immediate_oops, !immediate_oops.is_empty());


 220 }
 221 
 222 ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) {
 223   ZNMethodData* const data = gc_data(nm);
 224   if (data == NULL) {
 225     return NULL;
 226   }
 227   return data->lock();
 228 }
 229 
 230 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
 231   assert(is_power_of_2(size), "Invalid size");
 232   const size_t mask = size - 1;
 233   const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
 234   return hash & mask;
 235 }
 236 
 237 size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
 238   assert(is_power_of_2(size), "Invalid size");
 239   const size_t mask = size - 1;
 240   return (prev_index + 1) & mask;
 241 }
 242 
 243 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry) {
 244   const nmethod* const nm = entry.method();
 245   size_t index = first_index(nm, size);
 246 
 247   for (;;) {
 248     const ZNMethodTableEntry table_entry = table[index];
 249 
 250     if (!table_entry.registered() && !table_entry.unregistered()) {
 251       // Insert new entry
 252       table[index] = entry;
 253       return true;
 254     }
 255 
 256     if (table_entry.registered() && table_entry.method() == nm) {
 257       // Replace existing entry
 258       table[index] = entry;
 259       return false;
 260     }
 261 
 262     index = next_index(index, size);
 263   }
 264 }
 265 
 266 void ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 267   if (size == 0) {
 268     // Table is empty
 269     return;
 270   }
 271 
 272   size_t index = first_index(nm, size);
 273 
 274   for (;;) {
 275     const ZNMethodTableEntry table_entry = table[index];
 276     assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
 277 
 278     if (table_entry.registered() && table_entry.method() == nm) {
 279       // Remove entry
 280       table[index] = ZNMethodTableEntry(true /* unregistered */);
 281 
 282       // Destroy GC data
 283       ZNMethodData::destroy(gc_data(nm));
 284       set_gc_data(nm, NULL);
 285       return;
 286     }
 287 
 288     index = next_index(index, size);
 289   }
 290 }
 291 
 292 void ZNMethodTable::rebuild(size_t new_size) {
 293   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 294 
 295   assert(is_power_of_2(new_size), "Invalid size");
 296 
 297   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
 298                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
 299                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
 300                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
 301                          _size, new_size,
 302                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
 303                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 304 
 305   // Allocate new table
 306   ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
 307 
 308   // Transfer all registered entries
 309   for (size_t i = 0; i < _size; i++) {
 310     const ZNMethodTableEntry entry = _table[i];
 311     if (entry.registered()) {
 312       register_entry(new_table, new_size, entry);
 313     }
 314   }
 315 
 316   if (_iter_table != _table) {
 317     // Delete old table
 318     delete [] _table;
 319   }
 320 
 321   // Install new table
 322   _table = new_table;
 323   _size = new_size;
 324   _nunregistered = 0;
 325 }
 326 
 327 void ZNMethodTable::rebuild_if_needed() {
 328   // The hash table uses linear probing. To avoid wasting memory while
 329   // at the same time maintaining good hash collision behavior we want
 330   // to keep the table occupancy between 30% and 70%. The table always
 331   // grows/shrinks by doubling/halving its size. Pruning of unregistered
 332   // entries is done by rebuilding the table with or without resizing it.


 336   const size_t grow_threshold = _size * 0.70;
 337 
 338   if (_size == 0) {
 339     // Initialize table
 340     rebuild(min_size);
 341   } else if (_nregistered < shrink_threshold && _size > min_size) {
 342     // Shrink table
 343     rebuild(_size / 2);
 344   } else if (_nregistered + _nunregistered > grow_threshold) {
 345     // Prune or grow table
 346     if (_nregistered < prune_threshold) {
 347       // Prune table
 348       rebuild(_size);
 349     } else {
 350       // Grow table
 351       rebuild(_size * 2);
 352     }
 353   }
 354 }
 355 
 356 void ZNMethodTable::log_register(const nmethod* nm, ZNMethodTableEntry entry) {
 357   LogTarget(Trace, gc, nmethod) log;
 358   if (!log.is_enabled()) {
 359     return;
 360   }
 361 


 362   log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
 363             "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
 364             nm->method()->method_holder()->external_name(),
 365             nm->method()->name()->as_C_string(),
 366             p2i(nm),
 367             nm->compiler_name(),
 368             nm->oops_count() - 1,
 369             entry.immediate_oops() ? gc_data(nm)->immediate_oops()->immediate_oops_count() : 0,
 370             entry.non_immediate_oops() ? "Yes" : "No");
 371 
 372   LogTarget(Trace, gc, nmethod, oops) log_oops;
 373   if (!log_oops.is_enabled()) {
 374     return;
 375   }
 376 
 377   // Print nmethod oops table
 378   oop* const begin = nm->oops_begin();
 379   oop* const end = nm->oops_end();
 380   for (oop* p = begin; p < end; p++) {
 381     log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
 382                    (p - begin), p2i(*p), (*p)->klass()->external_name());
 383   }
 384 
 385   if (entry.immediate_oops()) {
 386     // Print nmethod immediate oops
 387     const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
 388     if (nmi != NULL) {
 389       oop** const begin = nmi->immediate_oops_begin();
 390       oop** const end = nmi->immediate_oops_end();
 391       for (oop** p = begin; p < end; p++) {
 392         log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
 393                        (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
 394       }
 395     }
 396   }
 397 }
 398 
 399 void ZNMethodTable::log_unregister(const nmethod* nm) {
 400   LogTarget(Debug, gc, nmethod) log;
 401   if (!log.is_enabled()) {
 402     return;
 403   }
 404 
 405   log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
 406             nm->method()->method_holder()->external_name(),
 407             nm->method()->name()->as_C_string(),
 408             p2i(nm));
 409 }
 410 
 411 size_t ZNMethodTable::registered_nmethods() {
 412   return _nregistered;
 413 }
 414 
 415 size_t ZNMethodTable::unregistered_nmethods() {
 416   return _nunregistered;
 417 }
 418 
 419 void ZNMethodTable::register_nmethod(nmethod* nm) {
 420   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 421   ResourceMark rm;
 422 
 423   // Grow/Shrink/Prune table if needed
 424   rebuild_if_needed();
 425 
 426   // Create entry
 427   const ZNMethodTableEntry entry = create_entry(nm);
 428 
 429   log_register(nm, entry);
 430 
 431   // Insert new entry
 432   if (register_entry(_table, _size, entry)) {
 433     // New entry registered. When register_entry() instead returns
 434     // false the nmethod was already in the table so we do not want
 435     // to increase number of registered entries in that case.
 436     _nregistered++;
 437   }
 438 
 439   // Disarm nmethod entry barrier
 440   disarm_nmethod(nm);
 441 }
 442 
 443 void ZNMethodTable::wait_until_iteration_done() {
 444   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 445 
 446   while (_iter_table != NULL) {
 447     CodeCache_lock->wait(Monitor::_no_safepoint_check_flag);
 448   }
 449 }
 450 
 451 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
 452   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 453 
 454   if (Thread::current()->is_Code_cache_sweeper_thread()) {
 455     // The sweeper must wait for any ongoing iteration to complete
 456     // before it can unregister an nmethod.
 457     ZNMethodTable::wait_until_iteration_done();
 458   }
 459 
 460   ResourceMark rm;
 461 
 462   log_unregister(nm);
 463 
 464   // Remove entry
 465   unregister_entry(_table, _size, nm);
 466   _nunregistered++;
 467   _nregistered--;


 468 }
 469 
 470 void ZNMethodTable::disarm_nmethod(nmethod* nm) {
 471   BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 472   if (bs != NULL) {
 473     bs->disarm(nm);
 474   }
 475 }
 476 
 477 void ZNMethodTable::nmethod_entries_do_begin() {
 478   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 479 
 480   // Prepare iteration
 481   _iter_table = _table;
 482   _iter_table_size = _size;
 483   _claimed = 0;
 484   assert(_iter_deferred_deletes.is_empty(), "Should be emtpy");
 485 }
 486 
 487 void ZNMethodTable::nmethod_entries_do_end() {
 488   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 489 
 490   // Finish iteration
 491   if (_iter_table != _table) {
 492     delete [] _iter_table;
 493   }
 494   _iter_table = NULL;
 495 
 496   assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
 497 
 498   // Process deferred deletes
 499   ZArrayIterator<void*> iter(&_iter_deferred_deletes);
 500   for (void* data; iter.next(&data);) {
 501     FREE_C_HEAP_ARRAY(uint8_t, data);
 502   }
 503   _iter_deferred_deletes.clear();
 504 
 505   // Notify iteration done
 506   CodeCache_lock->notify_all();
 507 }
 508 
 509 void ZNMethodTable::entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl) {
 510   nmethod* const nm = entry.method();
 511 
 512   // Process oops table
 513   oop* const begin = nm->oops_begin();
 514   oop* const end = nm->oops_end();
 515   for (oop* p = begin; p < end; p++) {
 516     if (*p != Universe::non_oop_word()) {
 517       cl->do_oop(p);
 518     }
 519   }
 520 


 521   // Process immediate oops
 522   if (entry.immediate_oops()) {
 523     const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
 524     if (nmi != NULL) {
 525       oop** const begin = nmi->immediate_oops_begin();
 526       oop** const end = nmi->immediate_oops_end();
 527       for (oop** p = begin; p < end; p++) {
 528         if (**p != Universe::non_oop_word()) {
 529           cl->do_oop(*p);
 530         }
 531       }
 532     }
 533   }
 534 
 535   // Process non-immediate oops
 536   if (entry.non_immediate_oops()) {
 537     nmethod* const nm = entry.method();
 538     nm->fix_oop_relocations();
 539   }
 540 }
 541 
 542 class ZNMethodTableEntryToOopsDo : public ZNMethodTableEntryClosure {
 543 private:
 544   OopClosure* _cl;
 545 
 546 public:
 547   ZNMethodTableEntryToOopsDo(OopClosure* cl) :
 548       _cl(cl) {}
 549 
 550   void do_nmethod_entry(ZNMethodTableEntry entry) {
 551     ZNMethodTable::entry_oops_do(entry, _cl);
 552   }
 553 };
 554 
 555 void ZNMethodTable::oops_do(OopClosure* cl) {
 556   ZNMethodTableEntryToOopsDo entry_cl(cl);
 557   nmethod_entries_do(&entry_cl);
 558 }
 559 
 560 void ZNMethodTable::nmethod_entries_do(ZNMethodTableEntryClosure* cl) {
 561   for (;;) {
 562     // Claim table partition. Each partition is currently sized to span
 563     // two cache lines. This number is just a guess, but seems to work well.
 564     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
 565     const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _iter_table_size);
 566     const size_t partition_end = MIN2(partition_start + partition_size, _iter_table_size);
 567     if (partition_start == partition_end) {
 568       // End of table
 569       break;
 570     }
 571 
 572     // Process table partition
 573     for (size_t i = partition_start; i < partition_end; i++) {
 574       const ZNMethodTableEntry entry = _iter_table[i];
 575       if (entry.registered()) {
 576         cl->do_nmethod_entry(entry);
 577       }
 578     }
 579   }
 580 }
 581 
 582 class ZNMethodTableUnlinkClosure : public ZNMethodTableEntryClosure {
 583 private:
 584   bool          _unloading_occurred;
 585   volatile bool _failed;
 586 
 587   void set_failed() {
 588     Atomic::store(true, &_failed);
 589   }
 590 
 591 public:
 592   ZNMethodTableUnlinkClosure(bool unloading_occurred) :
 593       _unloading_occurred(unloading_occurred),
 594       _failed(false) {}
 595 
 596   virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
 597     if (failed()) {
 598       return;
 599     }
 600 
 601     nmethod* const nm = entry.method();
 602     if (!nm->is_alive()) {
 603       return;
 604     }
 605 
 606     ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
 607 
 608     if (nm->is_unloading()) {
 609       // Unlinking of the dependencies must happen before the
 610       // handshake separating unlink and purge.
 611       nm->flush_dependencies(false /* delete_immediately */);
 612 
 613       // We don't need to take the lock when unlinking nmethods from
 614       // the Method, because it is only concurrently unlinked by
 615       // the entry barrier, which acquires the per nmethod lock.
 616       nm->unlink_from_method(false /* acquire_lock */);
 617       return;
 618     }
 619 
 620     // Heal oops and disarm
 621     ZNMethodOopClosure cl;
 622     ZNMethodTable::entry_oops_do(entry, &cl);
 623     ZNMethodTable::disarm_nmethod(nm);
 624 
 625     // Clear compiled ICs and exception caches
 626     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 627       set_failed();
 628     }
 629   }
 630 
 631   bool failed() const {
 632     return Atomic::load(&_failed);
 633   }
 634 };
 635 
 636 class ZNMethodTableUnlinkTask : public ZTask {
 637 private:
 638   ZNMethodTableUnlinkClosure _cl;
 639   ICRefillVerifier*          _verifier;
 640 
 641 public:
 642   ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 643       ZTask("ZNMethodTableUnlinkTask"),
 644       _cl(unloading_occurred),
 645       _verifier(verifier) {
 646     ZNMethodTable::nmethod_entries_do_begin();
 647   }
 648 
 649   ~ZNMethodTableUnlinkTask() {
 650     ZNMethodTable::nmethod_entries_do_end();
 651   }
 652 
 653   virtual void work() {
 654     ICRefillVerifierMark mark(_verifier);
 655     ZNMethodTable::nmethod_entries_do(&_cl);
 656   }
 657 
 658   bool success() const {
 659     return !_cl.failed();
 660   }
 661 };
 662 
 663 void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) {
 664   for (;;) {
 665     ICRefillVerifier verifier;
 666 
 667     {
 668       ZNMethodTableUnlinkTask task(unloading_occurred, &verifier);
 669       workers->run_concurrent(&task);
 670       if (task.success()) {
 671         return;
 672       }
 673     }
 674 
 675     // Cleaning failed because we ran out of transitional IC stubs,
 676     // so we have to refill and try again. Refilling requires taking
 677     // a safepoint, so we temporarily leave the suspendible thread set.
 678     SuspendibleThreadSetLeaver sts;
 679     InlineCacheBuffer::refill_ic_stubs();
 680   }
 681 }
 682 
 683 class ZNMethodTablePurgeClosure : public ZNMethodTableEntryClosure {
 684 public:
 685   virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
 686     nmethod* const nm = entry.method();
 687     if (nm->is_alive() && nm->is_unloading()) {
 688       nm->make_unloaded();
 689     }
 690   }
 691 };
 692 
 693 class ZNMethodTablePurgeTask : public ZTask {
 694 private:
 695   ZNMethodTablePurgeClosure _cl;
 696 
 697 public:
 698   ZNMethodTablePurgeTask() :
 699       ZTask("ZNMethodTablePurgeTask"),
 700       _cl() {
 701     ZNMethodTable::nmethod_entries_do_begin();
 702   }
 703 
 704   ~ZNMethodTablePurgeTask() {
 705     ZNMethodTable::nmethod_entries_do_end();
 706   }
 707 
 708   virtual void work() {
 709     ZNMethodTable::nmethod_entries_do(&_cl);
 710   }
 711 };
 712 
 713 void ZNMethodTable::purge(ZWorkers* workers) {
 714   ZNMethodTablePurgeTask task;
 715   workers->run_concurrent(&task);
 716 }


  25 #include "code/relocInfo.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/z/zArray.inline.hpp"
  31 #include "gc/z/zGlobals.hpp"
  32 #include "gc/z/zHash.inline.hpp"
  33 #include "gc/z/zLock.inline.hpp"
  34 #include "gc/z/zNMethodTable.hpp"
  35 #include "gc/z/zOopClosures.inline.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zWorkers.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/allocation.inline.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/orderAccess.hpp"
  43 #include "utilities/debug.hpp"
  44 
  45 class ZNMethodDataOops {
  46 private:
  47   const size_t _nimmediates;
  48   bool         _has_non_immediates;
  49 
  50   static size_t header_size();
  51 
  52   ZNMethodDataOops(const GrowableArray<oop*>& immediates, bool has_non_immediates);
  53 
  54 public:
  55   static ZNMethodDataOops* create(const GrowableArray<oop*>& immediates, bool has_non_immediates);
  56   static void destroy(ZNMethodDataOops* oops);
  57 
  58   size_t immediates_count() const;
  59   oop**  immediates_begin() const;
  60   oop**  immediates_end()   const;
  61 
  62   bool   has_non_immediates() const;
  63 };
  64 
  65 size_t ZNMethodDataOops::header_size() {
  66   const size_t size = sizeof(ZNMethodDataOops);
  67   assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
  68   return size;
  69 }
  70 
  71 ZNMethodDataOops* ZNMethodDataOops::create(const GrowableArray<oop*>& immediates, bool has_non_immediates) {
  72   // Allocate memory for the ZNMethodDataOops object
  73   // plus the immediate oop* array that follows right after.
  74   const size_t size = ZNMethodDataOops::header_size() + (sizeof(oop*) * immediates.length());
  75   void* const data = NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
  76   return ::new (data) ZNMethodDataOops(immediates, has_non_immediates);
  77 }
  78 
  79 void ZNMethodDataOops::destroy(ZNMethodDataOops* oops) {
  80   ZNMethodTable::safe_delete(oops);
  81 }
  82 
  83 ZNMethodDataOops::ZNMethodDataOops(const GrowableArray<oop*>& immediates, bool has_non_immediates) :
  84     _nimmediates(immediates.length()),
  85     _has_non_immediates(has_non_immediates) {
  86   // Save all immediate oops
  87   for (size_t i = 0; i < _nimmediates; i++) {
  88     immediates_begin()[i] = immediates.at(i);
  89   }
  90 }
  91 
  92 size_t ZNMethodDataOops::immediates_count() const {
  93   return _nimmediates;
  94 }
  95 
  96 oop** ZNMethodDataOops::immediates_begin() const {
  97   // The immediate oop* array starts immediately after this object
  98   return (oop**)((uintptr_t)this + header_size());
  99 }
 100 
 101 oop** ZNMethodDataOops::immediates_end() const {
 102   return immediates_begin() + immediates_count();
 103 }
 104 
 105 bool ZNMethodDataOops::has_non_immediates() const {
 106   return _has_non_immediates;
 107 }
 108 
 109 class ZNMethodData {
 110 private:
 111   ZReentrantLock             _lock;
 112   ZNMethodDataOops* volatile _oops;
 113 
 114   ZNMethodData(nmethod* nm);
 115 
 116 public:
 117   static ZNMethodData* create(nmethod* nm);
 118   static void destroy(ZNMethodData* data);
 119 
 120   ZReentrantLock* lock();
 121 
 122   ZNMethodDataOops* oops() const;
 123   ZNMethodDataOops* swap_oops(ZNMethodDataOops* oops);
 124 };
 125 
 126 ZNMethodData* ZNMethodData::create(nmethod* nm) {
 127   void* const method = NEW_C_HEAP_ARRAY(uint8_t, sizeof(ZNMethodData), mtGC);
 128   return ::new (method) ZNMethodData(nm);
 129 }
 130 
 131 void ZNMethodData::destroy(ZNMethodData* data) {
 132   ZNMethodDataOops::destroy(data->oops());
 133   ZNMethodTable::safe_delete(data);
 134 }
 135 
 136 ZNMethodData::ZNMethodData(nmethod* nm) :
 137     _lock(),
 138     _oops(NULL) {}
 139 
 140 ZReentrantLock* ZNMethodData::lock() {
 141   return &_lock;
 142 }
 143 
 144 ZNMethodDataOops* ZNMethodData::oops() const {
 145   return OrderAccess::load_acquire(&_oops);
 146 }
 147 
 148 ZNMethodDataOops* ZNMethodData::swap_oops(ZNMethodDataOops* new_oops) {
 149   return Atomic::xchg(new_oops, &_oops);


 150 }
 151 
 152 static ZNMethodData* gc_data(const nmethod* nm) {
 153   return nm->gc_data<ZNMethodData>();
 154 }
 155 
 156 static void set_gc_data(nmethod* nm, ZNMethodData* data) {
 157   return nm->set_gc_data<ZNMethodData>(data);
 158 }
 159 
 160 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
 161 size_t ZNMethodTable::_size = 0;
 162 ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
 163 size_t ZNMethodTable::_iter_table_size = 0;
 164 ZArray<void*> ZNMethodTable::_iter_deferred_deletes;
 165 size_t ZNMethodTable::_nregistered = 0;
 166 size_t ZNMethodTable::_nunregistered = 0;
 167 volatile size_t ZNMethodTable::_claimed = 0;
 168 
 169 void ZNMethodTable::safe_delete(void* data) {
 170   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 171 
 172   if (data == NULL) {
 173     return;
 174   }
 175 
 176   if (_iter_table != NULL) {
 177     // Iteration in progress, defer delete
 178     _iter_deferred_deletes.add(data);
 179   } else {
 180     // Iteration not in progress, delete now
 181     FREE_C_HEAP_ARRAY(uint8_t, data);
 182   }
 183 }
 184 
 185 void ZNMethodTable::attach_gc_data(nmethod* nm) {
 186   GrowableArray<oop*> immediate_oops;
 187   bool non_immediate_oops = false;
 188 
 189   // Find all oops relocations
 190   RelocIterator iter(nm);
 191   while (iter.next()) {
 192     if (iter.type() != relocInfo::oop_type) {
 193       // Not an oop
 194       continue;
 195     }
 196 
 197     oop_Relocation* r = iter.oop_reloc();
 198 
 199     if (!r->oop_is_immediate()) {
 200       // Non-immediate oop found
 201       non_immediate_oops = true;
 202       continue;
 203     }
 204 
 205     if (r->oop_value() != NULL) {
 206       // Non-NULL immediate oop found. NULL oops can safely be
 207       // ignored since the method will be re-registered if they
 208       // are later patched to be non-NULL.
 209       immediate_oops.push(r->oop_addr());
 210     }
 211   }
 212 
 213   // Attach GC data to nmethod
 214   ZNMethodData* data = gc_data(nm);
 215   if (data == NULL) {
 216     data = ZNMethodData::create(nm);
 217     set_gc_data(nm, data);
 218   }
 219 
 220   // Attach oops in GC data
 221   ZNMethodDataOops* const new_oops = ZNMethodDataOops::create(immediate_oops, non_immediate_oops);
 222   ZNMethodDataOops* const old_oops = data->swap_oops(new_oops);
 223   ZNMethodDataOops::destroy(old_oops);
 224 }
 225 
 226 void ZNMethodTable::detach_gc_data(nmethod* nm) {
 227   // Destroy GC data
 228   ZNMethodData::destroy(gc_data(nm));
 229   set_gc_data(nm, NULL);
 230 }
 231 
 232 ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) {
 233   ZNMethodData* const data = gc_data(nm);
 234   if (data == NULL) {
 235     return NULL;
 236   }
 237   return data->lock();
 238 }
 239 
 240 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
 241   assert(is_power_of_2(size), "Invalid size");
 242   const size_t mask = size - 1;
 243   const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
 244   return hash & mask;
 245 }
 246 
 247 size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
 248   assert(is_power_of_2(size), "Invalid size");
 249   const size_t mask = size - 1;
 250   return (prev_index + 1) & mask;
 251 }
 252 
 253 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 254   const ZNMethodTableEntry entry(nm);
 255   size_t index = first_index(nm, size);
 256 
 257   for (;;) {
 258     const ZNMethodTableEntry table_entry = table[index];
 259 
 260     if (!table_entry.registered() && !table_entry.unregistered()) {
 261       // Insert new entry
 262       table[index] = entry;
 263       return true;
 264     }
 265 
 266     if (table_entry.registered() && table_entry.method() == nm) {
 267       // Replace existing entry
 268       table[index] = entry;
 269       return false;
 270     }
 271 
 272     index = next_index(index, size);
 273   }
 274 }
 275 
 276 void ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {





 277   size_t index = first_index(nm, size);
 278 
 279   for (;;) {
 280     const ZNMethodTableEntry table_entry = table[index];
 281     assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
 282 
 283     if (table_entry.registered() && table_entry.method() == nm) {
 284       // Remove entry
 285       table[index] = ZNMethodTableEntry(true /* unregistered */);




 286       return;
 287     }
 288 
 289     index = next_index(index, size);
 290   }
 291 }
 292 
 293 void ZNMethodTable::rebuild(size_t new_size) {
 294   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 295 
 296   assert(is_power_of_2(new_size), "Invalid size");
 297 
 298   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
 299                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
 300                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
 301                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
 302                          _size, new_size,
 303                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
 304                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 305 
 306   // Allocate new table
 307   ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
 308 
 309   // Transfer all registered entries
 310   for (size_t i = 0; i < _size; i++) {
 311     const ZNMethodTableEntry entry = _table[i];
 312     if (entry.registered()) {
 313       register_entry(new_table, new_size, entry.method());
 314     }
 315   }
 316 
 317   if (_iter_table != _table) {
 318     // Delete old table
 319     delete [] _table;
 320   }
 321 
 322   // Install new table
 323   _table = new_table;
 324   _size = new_size;
 325   _nunregistered = 0;
 326 }
 327 
 328 void ZNMethodTable::rebuild_if_needed() {
 329   // The hash table uses linear probing. To avoid wasting memory while
 330   // at the same time maintaining good hash collision behavior we want
 331   // to keep the table occupancy between 30% and 70%. The table always
 332   // grows/shrinks by doubling/halving its size. Pruning of unregistered
 333   // entries is done by rebuilding the table with or without resizing it.


 337   const size_t grow_threshold = _size * 0.70;
 338 
 339   if (_size == 0) {
 340     // Initialize table
 341     rebuild(min_size);
 342   } else if (_nregistered < shrink_threshold && _size > min_size) {
 343     // Shrink table
 344     rebuild(_size / 2);
 345   } else if (_nregistered + _nunregistered > grow_threshold) {
 346     // Prune or grow table
 347     if (_nregistered < prune_threshold) {
 348       // Prune table
 349       rebuild(_size);
 350     } else {
 351       // Grow table
 352       rebuild(_size * 2);
 353     }
 354   }
 355 }
 356 
 357 void ZNMethodTable::log_register(const nmethod* nm) {
 358   LogTarget(Trace, gc, nmethod) log;
 359   if (!log.is_enabled()) {
 360     return;
 361   }
 362 
 363   const ZNMethodDataOops* const oops = gc_data(nm)->oops();
 364 
 365   log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
 366             "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
 367             nm->method()->method_holder()->external_name(),
 368             nm->method()->name()->as_C_string(),
 369             p2i(nm),
 370             nm->compiler_name(),
 371             nm->oops_count() - 1,
 372             oops->immediates_count(),
 373             oops->has_non_immediates() ? "Yes" : "No");
 374 
 375   LogTarget(Trace, gc, nmethod, oops) log_oops;
 376   if (!log_oops.is_enabled()) {
 377     return;
 378   }
 379 
 380   // Print nmethod oops table
 381   oop* const begin = nm->oops_begin();
 382   oop* const end = nm->oops_end();
 383   for (oop* p = begin; p < end; p++) {
 384     log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
 385                    (p - begin), p2i(*p), (*p)->klass()->external_name());
 386   }
 387 

 388   // Print nmethod immediate oops
 389   if (oops->immediates_count() > 0) {
 390     oop** const begin = oops->immediates_begin();
 391     oop** const end = oops->immediates_end();

 392     for (oop** p = begin; p < end; p++) {
 393       log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
 394                      (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
 395     }
 396   }

 397 }
 398 
 399 void ZNMethodTable::log_unregister(const nmethod* nm) {
 400   LogTarget(Debug, gc, nmethod) log;
 401   if (!log.is_enabled()) {
 402     return;
 403   }
 404 
 405   log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
 406             nm->method()->method_holder()->external_name(),
 407             nm->method()->name()->as_C_string(),
 408             p2i(nm));
 409 }
 410 
 411 size_t ZNMethodTable::registered_nmethods() {
 412   return _nregistered;
 413 }
 414 
 415 size_t ZNMethodTable::unregistered_nmethods() {
 416   return _nunregistered;
 417 }
 418 
 419 void ZNMethodTable::register_nmethod(nmethod* nm) {
 420   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 421   ResourceMark rm;
 422 
 423   // Grow/Shrink/Prune table if needed
 424   rebuild_if_needed();
 425 
 426   // Create and attach gc data
 427   attach_gc_data(nm);
 428 
 429   log_register(nm);
 430 
 431   // Insert new entry
 432   if (register_entry(_table, _size, nm)) {
 433     // New entry registered. When register_entry() instead returns
 434     // false the nmethod was already in the table so we do not want
 435     // to increase number of registered entries in that case.
 436     _nregistered++;
 437   }
 438 
 439   // Disarm nmethod entry barrier
 440   disarm_nmethod(nm);
 441 }
 442 
 443 void ZNMethodTable::wait_until_iteration_done() {
 444   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 445 
 446   while (_iter_table != NULL) {
 447     CodeCache_lock->wait(Monitor::_no_safepoint_check_flag);
 448   }
 449 }
 450 
 451 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
 452   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 453 
 454   if (Thread::current()->is_Code_cache_sweeper_thread()) {
 455     // The sweeper must wait for any ongoing iteration to complete
 456     // before it can unregister an nmethod.
 457     ZNMethodTable::wait_until_iteration_done();
 458   }
 459 
 460   ResourceMark rm;
 461 
 462   log_unregister(nm);
 463 
 464   // Remove entry
 465   unregister_entry(_table, _size, nm);
 466   _nunregistered++;
 467   _nregistered--;
 468 
 469   detach_gc_data(nm);
 470 }
 471 
 472 void ZNMethodTable::disarm_nmethod(nmethod* nm) {
 473   BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 474   if (bs != NULL) {
 475     bs->disarm(nm);
 476   }
 477 }
 478 
 479 void ZNMethodTable::nmethods_do_begin() {
 480   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 481 
 482   // Prepare iteration
 483   _iter_table = _table;
 484   _iter_table_size = _size;
 485   _claimed = 0;
 486   assert(_iter_deferred_deletes.is_empty(), "Should be emtpy");
 487 }
 488 
 489 void ZNMethodTable::nmethods_do_end() {
 490   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 491 
 492   // Finish iteration
 493   if (_iter_table != _table) {
 494     delete [] _iter_table;
 495   }
 496   _iter_table = NULL;
 497 
 498   assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
 499 
 500   // Process deferred deletes
 501   ZArrayIterator<void*> iter(&_iter_deferred_deletes);
 502   for (void* data; iter.next(&data);) {
 503     FREE_C_HEAP_ARRAY(uint8_t, data);
 504   }
 505   _iter_deferred_deletes.clear();
 506 
 507   // Notify iteration done
 508   CodeCache_lock->notify_all();
 509 }
 510 
 511 void ZNMethodTable::oops_do(nmethod* nm, OopClosure* cl) {


 512   // Process oops table
 513   oop* const begin = nm->oops_begin();
 514   oop* const end = nm->oops_end();
 515   for (oop* p = begin; p < end; p++) {
 516     if (*p != Universe::non_oop_word()) {
 517       cl->do_oop(p);
 518     }
 519   }
 520 
 521   ZNMethodDataOops* const oops = gc_data(nm)->oops();
 522 
 523   // Process immediate oops
 524   if (oops->immediates_count() > 0) {
 525     oop** const begin = oops->immediates_begin();
 526     oop** const end = oops->immediates_end();


 527     for (oop** p = begin; p < end; p++) {
 528       if (**p != Universe::non_oop_word()) {
 529         cl->do_oop(*p);
 530       }
 531     }
 532   }

 533 
 534   // Process non-immediate oops
 535   if (oops->has_non_immediates()) {

 536     nm->fix_oop_relocations();
 537   }
 538 }
 539 
 540 class ZNMethodToOopsDo : public ZNMethodClosure {
 541 private:
 542   OopClosure* _cl;
 543 
 544 public:
 545   ZNMethodToOopsDo(OopClosure* cl) :
 546       _cl(cl) {}
 547 
 548   void do_nmethod(nmethod* nm) {
 549     ZNMethodTable::oops_do(nm, _cl);
 550   }
 551 };
 552 
 553 void ZNMethodTable::oops_do(OopClosure* cl) {
 554   ZNMethodToOopsDo nm_cl(cl);
 555   nmethods_do(&nm_cl);
 556 }
 557 
 558 void ZNMethodTable::nmethods_do(ZNMethodClosure* cl) {
 559   for (;;) {
 560     // Claim table partition. Each partition is currently sized to span
 561     // two cache lines. This number is just a guess, but seems to work well.
 562     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
 563     const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _iter_table_size);
 564     const size_t partition_end = MIN2(partition_start + partition_size, _iter_table_size);
 565     if (partition_start == partition_end) {
 566       // End of table
 567       break;
 568     }
 569 
 570     // Process table partition
 571     for (size_t i = partition_start; i < partition_end; i++) {
 572       const ZNMethodTableEntry entry = _iter_table[i];
 573       if (entry.registered()) {
 574         cl->do_nmethod(entry.method());
 575       }
 576     }
 577   }
 578 }
 579 
 580 class ZNMethodTableUnlinkClosure : public ZNMethodClosure {
 581 private:
 582   bool          _unloading_occurred;
 583   volatile bool _failed;
 584 
 585   void set_failed() {
 586     Atomic::store(true, &_failed);
 587   }
 588 
 589 public:
 590   ZNMethodTableUnlinkClosure(bool unloading_occurred) :
 591       _unloading_occurred(unloading_occurred),
 592       _failed(false) {}
 593 
 594   virtual void do_nmethod(nmethod* nm) {
 595     if (failed()) {
 596       return;
 597     }
 598 

 599     if (!nm->is_alive()) {
 600       return;
 601     }
 602 
 603     ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
 604 
 605     if (nm->is_unloading()) {
 606       // Unlinking of the dependencies must happen before the
 607       // handshake separating unlink and purge.
 608       nm->flush_dependencies(false /* delete_immediately */);
 609 
 610       // We don't need to take the lock when unlinking nmethods from
 611       // the Method, because it is only concurrently unlinked by
 612       // the entry barrier, which acquires the per nmethod lock.
 613       nm->unlink_from_method(false /* acquire_lock */);
 614       return;
 615     }
 616 
 617     // Heal oops and disarm
 618     ZNMethodOopClosure cl;
 619     ZNMethodTable::oops_do(nm, &cl);
 620     ZNMethodTable::disarm_nmethod(nm);
 621 
 622     // Clear compiled ICs and exception caches
 623     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 624       set_failed();
 625     }
 626   }
 627 
 628   bool failed() const {
 629     return Atomic::load(&_failed);
 630   }
 631 };
 632 
 633 class ZNMethodTableUnlinkTask : public ZTask {
 634 private:
 635   ZNMethodTableUnlinkClosure _cl;
 636   ICRefillVerifier*          _verifier;
 637 
 638 public:
 639   ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 640       ZTask("ZNMethodTableUnlinkTask"),
 641       _cl(unloading_occurred),
 642       _verifier(verifier) {
 643     ZNMethodTable::nmethods_do_begin();
 644   }
 645 
 646   ~ZNMethodTableUnlinkTask() {
 647     ZNMethodTable::nmethods_do_end();
 648   }
 649 
 650   virtual void work() {
 651     ICRefillVerifierMark mark(_verifier);
 652     ZNMethodTable::nmethods_do(&_cl);
 653   }
 654 
 655   bool success() const {
 656     return !_cl.failed();
 657   }
 658 };
 659 
 660 void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) {
 661   for (;;) {
 662     ICRefillVerifier verifier;
 663 
 664     {
 665       ZNMethodTableUnlinkTask task(unloading_occurred, &verifier);
 666       workers->run_concurrent(&task);
 667       if (task.success()) {
 668         return;
 669       }
 670     }
 671 
 672     // Cleaning failed because we ran out of transitional IC stubs,
 673     // so we have to refill and try again. Refilling requires taking
 674     // a safepoint, so we temporarily leave the suspendible thread set.
 675     SuspendibleThreadSetLeaver sts;
 676     InlineCacheBuffer::refill_ic_stubs();
 677   }
 678 }
 679 
 680 class ZNMethodTablePurgeClosure : public ZNMethodClosure {
 681 public:
 682   virtual void do_nmethod(nmethod* nm) {

 683     if (nm->is_alive() && nm->is_unloading()) {
 684       nm->make_unloaded();
 685     }
 686   }
 687 };
 688 
 689 class ZNMethodTablePurgeTask : public ZTask {
 690 private:
 691   ZNMethodTablePurgeClosure _cl;
 692 
 693 public:
 694   ZNMethodTablePurgeTask() :
 695       ZTask("ZNMethodTablePurgeTask"),
 696       _cl() {
 697     ZNMethodTable::nmethods_do_begin();
 698   }
 699 
 700   ~ZNMethodTablePurgeTask() {
 701     ZNMethodTable::nmethods_do_end();
 702   }
 703 
 704   virtual void work() {
 705     ZNMethodTable::nmethods_do(&_cl);
 706   }
 707 };
 708 
 709 void ZNMethodTable::purge(ZWorkers* workers) {
 710   ZNMethodTablePurgeTask task;
 711   workers->run_concurrent(&task);
 712 }
< prev index next >