1 /*
   2  * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/relocInfo.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/z/zGlobals.hpp"
  31 #include "gc/z/zHash.inline.hpp"
  32 #include "gc/z/zLock.inline.hpp"
  33 #include "gc/z/zNMethodAllocator.hpp"
  34 #include "gc/z/zNMethodData.hpp"
  35 #include "gc/z/zNMethodTable.hpp"
  36 #include "gc/z/zOopClosures.inline.hpp"
  37 #include "gc/z/zTask.hpp"
  38 #include "gc/z/zWorkers.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/allocation.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/orderAccess.hpp"
  44 #include "utilities/debug.hpp"
  45 
  46 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
  47 size_t ZNMethodTable::_size = 0;
  48 ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
  49 size_t ZNMethodTable::_iter_table_size = 0;
  50 size_t ZNMethodTable::_nregistered = 0;
  51 size_t ZNMethodTable::_nunregistered = 0;
  52 volatile size_t ZNMethodTable::_claimed = 0;
  53 
  54 static ZNMethodData* gc_data(const nmethod* nm) {
  55   return nm->gc_data<ZNMethodData>();
  56 }
  57 
  58 static void set_gc_data(nmethod* nm, ZNMethodData* data) {
  59   return nm->set_gc_data<ZNMethodData>(data);
  60 }
  61 
  62 void ZNMethodTable::attach_gc_data(nmethod* nm) {
  63   GrowableArray<oop*> immediate_oops;
  64   bool non_immediate_oops = false;
  65 
  66   // Find all oops relocations
  67   RelocIterator iter(nm);
  68   while (iter.next()) {
  69     if (iter.type() != relocInfo::oop_type) {
  70       // Not an oop
  71       continue;
  72     }
  73 
  74     oop_Relocation* r = iter.oop_reloc();
  75 
  76     if (!r->oop_is_immediate()) {
  77       // Non-immediate oop found
  78       non_immediate_oops = true;
  79       continue;
  80     }
  81 
  82     if (r->oop_value() != NULL) {
  83       // Non-NULL immediate oop found. NULL oops can safely be
  84       // ignored since the method will be re-registered if they
  85       // are later patched to be non-NULL.
  86       immediate_oops.push(r->oop_addr());
  87     }
  88   }
  89 
  90   // Attach GC data to nmethod
  91   ZNMethodData* data = gc_data(nm);
  92   if (data == NULL) {
  93     data = ZNMethodData::create(nm);
  94     set_gc_data(nm, data);
  95   }
  96 
  97   // Attach oops in GC data
  98   ZNMethodDataOops* const new_oops = ZNMethodDataOops::create(immediate_oops, non_immediate_oops);
  99   ZNMethodDataOops* const old_oops = data->swap_oops(new_oops);
 100   ZNMethodDataOops::destroy(old_oops);
 101 }
 102 
 103 void ZNMethodTable::detach_gc_data(nmethod* nm) {
 104   // Destroy GC data
 105   ZNMethodData::destroy(gc_data(nm));
 106   set_gc_data(nm, NULL);
 107 }
 108 
 109 ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) {
 110   ZNMethodData* const data = gc_data(nm);
 111   if (data == NULL) {
 112     return NULL;
 113   }
 114   return data->lock();
 115 }
 116 
 117 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
 118   assert(is_power_of_2(size), "Invalid size");
 119   const size_t mask = size - 1;
 120   const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
 121   return hash & mask;
 122 }
 123 
 124 size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
 125   assert(is_power_of_2(size), "Invalid size");
 126   const size_t mask = size - 1;
 127   return (prev_index + 1) & mask;
 128 }
 129 
 130 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 131   const ZNMethodTableEntry entry(nm);
 132   size_t index = first_index(nm, size);
 133 
 134   for (;;) {
 135     const ZNMethodTableEntry table_entry = table[index];
 136 
 137     if (!table_entry.registered() && !table_entry.unregistered()) {
 138       // Insert new entry
 139       table[index] = entry;
 140       return true;
 141     }
 142 
 143     if (table_entry.registered() && table_entry.method() == nm) {
 144       // Replace existing entry
 145       table[index] = entry;
 146       return false;
 147     }
 148 
 149     index = next_index(index, size);
 150   }
 151 }
 152 
 153 void ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 154   size_t index = first_index(nm, size);
 155 
 156   for (;;) {
 157     const ZNMethodTableEntry table_entry = table[index];
 158     assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
 159 
 160     if (table_entry.registered() && table_entry.method() == nm) {
 161       // Remove entry
 162       table[index] = ZNMethodTableEntry(true /* unregistered */);
 163       return;
 164     }
 165 
 166     index = next_index(index, size);
 167   }
 168 }
 169 
 170 void ZNMethodTable::rebuild(size_t new_size) {
 171   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 172 
 173   assert(is_power_of_2(new_size), "Invalid size");
 174 
 175   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
 176                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
 177                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
 178                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
 179                          _size, new_size,
 180                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
 181                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 182 
 183   // Allocate new table
 184   ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
 185 
 186   // Transfer all registered entries
 187   for (size_t i = 0; i < _size; i++) {
 188     const ZNMethodTableEntry entry = _table[i];
 189     if (entry.registered()) {
 190       register_entry(new_table, new_size, entry.method());
 191     }
 192   }
 193 
 194   if (_iter_table != _table) {
 195     // Delete old table
 196     delete [] _table;
 197   }
 198 
 199   // Install new table
 200   _table = new_table;
 201   _size = new_size;
 202   _nunregistered = 0;
 203 }
 204 
 205 void ZNMethodTable::rebuild_if_needed() {
 206   // The hash table uses linear probing. To avoid wasting memory while
 207   // at the same time maintaining good hash collision behavior we want
 208   // to keep the table occupancy between 30% and 70%. The table always
 209   // grows/shrinks by doubling/halving its size. Pruning of unregistered
 210   // entries is done by rebuilding the table with or without resizing it.
 211   const size_t min_size = 1024;
 212   const size_t shrink_threshold = _size * 0.30;
 213   const size_t prune_threshold = _size * 0.65;
 214   const size_t grow_threshold = _size * 0.70;
 215 
 216   if (_size == 0) {
 217     // Initialize table
 218     rebuild(min_size);
 219   } else if (_nregistered < shrink_threshold && _size > min_size) {
 220     // Shrink table
 221     rebuild(_size / 2);
 222   } else if (_nregistered + _nunregistered > grow_threshold) {
 223     // Prune or grow table
 224     if (_nregistered < prune_threshold) {
 225       // Prune table
 226       rebuild(_size);
 227     } else {
 228       // Grow table
 229       rebuild(_size * 2);
 230     }
 231   }
 232 }
 233 
 234 void ZNMethodTable::log_register(const nmethod* nm) {
 235   LogTarget(Trace, gc, nmethod) log;
 236   if (!log.is_enabled()) {
 237     return;
 238   }
 239 
 240   const ZNMethodDataOops* const oops = gc_data(nm)->oops();
 241 
 242   log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
 243             "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
 244             nm->method()->method_holder()->external_name(),
 245             nm->method()->name()->as_C_string(),
 246             p2i(nm),
 247             nm->compiler_name(),
 248             nm->oops_count() - 1,
 249             oops->immediates_count(),
 250             oops->has_non_immediates() ? "Yes" : "No");
 251 
 252   LogTarget(Trace, gc, nmethod, oops) log_oops;
 253   if (!log_oops.is_enabled()) {
 254     return;
 255   }
 256 
 257   // Print nmethod oops table
 258   oop* const begin = nm->oops_begin();
 259   oop* const end = nm->oops_end();
 260   for (oop* p = begin; p < end; p++) {
 261     log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
 262                    (p - begin), p2i(*p), (*p)->klass()->external_name());
 263   }
 264 
 265   // Print nmethod immediate oops
 266   if (oops->immediates_count() > 0) {
 267     oop** const begin = oops->immediates_begin();
 268     oop** const end = oops->immediates_end();
 269     for (oop** p = begin; p < end; p++) {
 270       log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
 271                      (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
 272     }
 273   }
 274 }
 275 
 276 void ZNMethodTable::log_unregister(const nmethod* nm) {
 277   LogTarget(Debug, gc, nmethod) log;
 278   if (!log.is_enabled()) {
 279     return;
 280   }
 281 
 282   log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
 283             nm->method()->method_holder()->external_name(),
 284             nm->method()->name()->as_C_string(),
 285             p2i(nm));
 286 }
 287 
 288 size_t ZNMethodTable::registered_nmethods() {
 289   return _nregistered;
 290 }
 291 
 292 size_t ZNMethodTable::unregistered_nmethods() {
 293   return _nunregistered;
 294 }
 295 
 296 void ZNMethodTable::register_nmethod(nmethod* nm) {
 297   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 298   ResourceMark rm;
 299 
 300   // Grow/Shrink/Prune table if needed
 301   rebuild_if_needed();
 302 
 303   // Create and attach gc data
 304   attach_gc_data(nm);
 305 
 306   log_register(nm);
 307 
 308   // Insert new entry
 309   if (register_entry(_table, _size, nm)) {
 310     // New entry registered. When register_entry() instead returns
 311     // false the nmethod was already in the table so we do not want
 312     // to increase number of registered entries in that case.
 313     _nregistered++;
 314   }
 315 
 316   // Disarm nmethod entry barrier
 317   disarm_nmethod(nm);
 318 }
 319 
 320 void ZNMethodTable::wait_until_iteration_done() {
 321   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 322 
 323   while (_iter_table != NULL) {
 324     CodeCache_lock->wait(Monitor::_no_safepoint_check_flag);
 325   }
 326 }
 327 
 328 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
 329   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 330 
 331   if (Thread::current()->is_Code_cache_sweeper_thread()) {
 332     // The sweeper must wait for any ongoing iteration to complete
 333     // before it can unregister an nmethod.
 334     ZNMethodTable::wait_until_iteration_done();
 335   }
 336 
 337   ResourceMark rm;
 338 
 339   log_unregister(nm);
 340 
 341   // Remove entry
 342   unregister_entry(_table, _size, nm);
 343   _nunregistered++;
 344   _nregistered--;
 345 
 346   detach_gc_data(nm);
 347 }
 348 
 349 void ZNMethodTable::disarm_nmethod(nmethod* nm) {
 350   BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 351   if (bs != NULL) {
 352     bs->disarm(nm);
 353   }
 354 }
 355 
 356 void ZNMethodTable::nmethods_do_begin() {
 357   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 358 
 359   // Make sure we don't free data while iterating
 360   ZNMethodAllocator::activate_deferred_frees();
 361 
 362   // Prepare iteration
 363   _iter_table = _table;
 364   _iter_table_size = _size;
 365   _claimed = 0;
 366 }
 367 
 368 void ZNMethodTable::nmethods_do_end() {
 369   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 370 
 371   // Finish iteration
 372   if (_iter_table != _table) {
 373     delete [] _iter_table;
 374   }
 375   _iter_table = NULL;
 376 
 377   assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
 378 
 379   // Process deferred frees
 380   ZNMethodAllocator::deactivate_and_process_deferred_frees();
 381 
 382   // Notify iteration done
 383   CodeCache_lock->notify_all();
 384 }
 385 
 386 void ZNMethodTable::oops_do(nmethod* nm, OopClosure* cl) {
 387   // Process oops table
 388   oop* const begin = nm->oops_begin();
 389   oop* const end = nm->oops_end();
 390   for (oop* p = begin; p < end; p++) {
 391     if (*p != Universe::non_oop_word()) {
 392       cl->do_oop(p);
 393     }
 394   }
 395 
 396   ZNMethodDataOops* const oops = gc_data(nm)->oops();
 397 
 398   // Process immediate oops
 399   if (oops->immediates_count() > 0) {
 400     oop** const begin = oops->immediates_begin();
 401     oop** const end = oops->immediates_end();
 402     for (oop** p = begin; p < end; p++) {
 403       if (**p != Universe::non_oop_word()) {
 404         cl->do_oop(*p);
 405       }
 406     }
 407   }
 408 
 409   // Process non-immediate oops
 410   if (oops->has_non_immediates()) {
 411     nm->fix_oop_relocations();
 412   }
 413 }
 414 
 415 class ZNMethodToOopsDo : public ZNMethodClosure {
 416 private:
 417   OopClosure* _cl;
 418 
 419 public:
 420   ZNMethodToOopsDo(OopClosure* cl) :
 421       _cl(cl) {}
 422 
 423   void do_nmethod(nmethod* nm) {
 424     ZNMethodTable::oops_do(nm, _cl);
 425   }
 426 };
 427 
 428 void ZNMethodTable::oops_do(OopClosure* cl) {
 429   ZNMethodToOopsDo nm_cl(cl);
 430   nmethods_do(&nm_cl);
 431 }
 432 
 433 void ZNMethodTable::nmethods_do(ZNMethodClosure* cl) {
 434   for (;;) {
 435     // Claim table partition. Each partition is currently sized to span
 436     // two cache lines. This number is just a guess, but seems to work well.
 437     const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
 438     const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _iter_table_size);
 439     const size_t partition_end = MIN2(partition_start + partition_size, _iter_table_size);
 440     if (partition_start == partition_end) {
 441       // End of table
 442       break;
 443     }
 444 
 445     // Process table partition
 446     for (size_t i = partition_start; i < partition_end; i++) {
 447       const ZNMethodTableEntry entry = _iter_table[i];
 448       if (entry.registered()) {
 449         cl->do_nmethod(entry.method());
 450       }
 451     }
 452   }
 453 }
 454 
 455 class ZNMethodTableUnlinkClosure : public ZNMethodClosure {
 456 private:
 457   bool          _unloading_occurred;
 458   volatile bool _failed;
 459 
 460   void set_failed() {
 461     Atomic::store(true, &_failed);
 462   }
 463 
 464 public:
 465   ZNMethodTableUnlinkClosure(bool unloading_occurred) :
 466       _unloading_occurred(unloading_occurred),
 467       _failed(false) {}
 468 
 469   virtual void do_nmethod(nmethod* nm) {
 470     if (failed()) {
 471       return;
 472     }
 473 
 474     if (!nm->is_alive()) {
 475       return;
 476     }
 477 
 478     ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
 479 
 480     if (nm->is_unloading()) {
 481       // Unlinking of the dependencies must happen before the
 482       // handshake separating unlink and purge.
 483       nm->flush_dependencies(false /* delete_immediately */);
 484 
 485       // We don't need to take the lock when unlinking nmethods from
 486       // the Method, because it is only concurrently unlinked by
 487       // the entry barrier, which acquires the per nmethod lock.
 488       nm->unlink_from_method(false /* acquire_lock */);
 489       return;
 490     }
 491 
 492     // Heal oops and disarm
 493     ZNMethodOopClosure cl;
 494     ZNMethodTable::oops_do(nm, &cl);
 495     ZNMethodTable::disarm_nmethod(nm);
 496 
 497     // Clear compiled ICs and exception caches
 498     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 499       set_failed();
 500     }
 501   }
 502 
 503   bool failed() const {
 504     return Atomic::load(&_failed);
 505   }
 506 };
 507 
 508 class ZNMethodTableUnlinkTask : public ZTask {
 509 private:
 510   ZNMethodTableUnlinkClosure _cl;
 511   ICRefillVerifier*          _verifier;
 512 
 513 public:
 514   ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 515       ZTask("ZNMethodTableUnlinkTask"),
 516       _cl(unloading_occurred),
 517       _verifier(verifier) {
 518     ZNMethodTable::nmethods_do_begin();
 519   }
 520 
 521   ~ZNMethodTableUnlinkTask() {
 522     ZNMethodTable::nmethods_do_end();
 523   }
 524 
 525   virtual void work() {
 526     ICRefillVerifierMark mark(_verifier);
 527     ZNMethodTable::nmethods_do(&_cl);
 528   }
 529 
 530   bool success() const {
 531     return !_cl.failed();
 532   }
 533 };
 534 
 535 void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) {
 536   for (;;) {
 537     ICRefillVerifier verifier;
 538 
 539     {
 540       ZNMethodTableUnlinkTask task(unloading_occurred, &verifier);
 541       workers->run_concurrent(&task);
 542       if (task.success()) {
 543         return;
 544       }
 545     }
 546 
 547     // Cleaning failed because we ran out of transitional IC stubs,
 548     // so we have to refill and try again. Refilling requires taking
 549     // a safepoint, so we temporarily leave the suspendible thread set.
 550     SuspendibleThreadSetLeaver sts;
 551     InlineCacheBuffer::refill_ic_stubs();
 552   }
 553 }
 554 
 555 class ZNMethodTablePurgeClosure : public ZNMethodClosure {
 556 public:
 557   virtual void do_nmethod(nmethod* nm) {
 558     if (nm->is_alive() && nm->is_unloading()) {
 559       nm->make_unloaded();
 560     }
 561   }
 562 };
 563 
 564 class ZNMethodTablePurgeTask : public ZTask {
 565 private:
 566   ZNMethodTablePurgeClosure _cl;
 567 
 568 public:
 569   ZNMethodTablePurgeTask() :
 570       ZTask("ZNMethodTablePurgeTask"),
 571       _cl() {
 572     ZNMethodTable::nmethods_do_begin();
 573   }
 574 
 575   ~ZNMethodTablePurgeTask() {
 576     ZNMethodTable::nmethods_do_end();
 577   }
 578 
 579   virtual void work() {
 580     ZNMethodTable::nmethods_do(&_cl);
 581   }
 582 };
 583 
 584 void ZNMethodTable::purge(ZWorkers* workers) {
 585   ZNMethodTablePurgeTask task;
 586   workers->run_concurrent(&task);
 587 }