rev 53848 : 8219469: ZGC: Extract functions out from ZNMethodTable into new ZNMethod class

   1 /*
   2  * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/relocInfo.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "code/icBuffer.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/z/zGlobals.hpp"
  31 #include "gc/z/zHash.inline.hpp"
  32 #include "gc/z/zLock.inline.hpp"
  33 #include "gc/z/zNMethodAllocator.hpp"
  34 #include "gc/z/zNMethodClosure.hpp"
  35 #include "gc/z/zNMethodData.hpp"
  36 #include "gc/z/zNMethodTable.hpp"

  37 #include "gc/z/zNMethodTableIteration.hpp"
  38 #include "gc/z/zOopClosures.inline.hpp"
  39 #include "gc/z/zTask.hpp"
  40 #include "gc/z/zWorkers.hpp"
  41 #include "logging/log.hpp"
  42 #include "memory/allocation.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "runtime/atomic.hpp"
  45 #include "runtime/orderAccess.hpp"
  46 #include "utilities/debug.hpp"
  47 
  48 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
  49 size_t ZNMethodTable::_size = 0;
  50 size_t ZNMethodTable::_nregistered = 0;
  51 size_t ZNMethodTable::_nunregistered = 0;
  52 ZNMethodTableIteration ZNMethodTable::_iteration;
  53 
  54 static ZNMethodData* gc_data(const nmethod* nm) {
  55   return nm->gc_data<ZNMethodData>();
  56 }
  57 
  58 static void set_gc_data(nmethod* nm, ZNMethodData* data) {
  59   return nm->set_gc_data<ZNMethodData>(data);
  60 }
  61 
  62 ZNMethodTableEntry* ZNMethodTable::create(size_t size) {
  63   void* const mem = ZNMethodAllocator::allocate(size * sizeof(ZNMethodTableEntry));
  64   return ::new (mem) ZNMethodTableEntry[size];
  65 }
  66 
  67 void ZNMethodTable::destroy(ZNMethodTableEntry* table) {
  68   ZNMethodAllocator::free(table);
  69 }
  70 
  71 void ZNMethodTable::attach_gc_data(nmethod* nm) {
  72   GrowableArray<oop*> immediate_oops;
  73   bool non_immediate_oops = false;
  74 
  75   // Find all oops relocations
  76   RelocIterator iter(nm);
  77   while (iter.next()) {
  78     if (iter.type() != relocInfo::oop_type) {
  79       // Not an oop
  80       continue;
  81     }
  82 
  83     oop_Relocation* r = iter.oop_reloc();
  84 
  85     if (!r->oop_is_immediate()) {
  86       // Non-immediate oop found
  87       non_immediate_oops = true;
  88       continue;
  89     }
  90 
  91     if (r->oop_value() != NULL) {
  92       // Non-NULL immediate oop found. NULL oops can safely be
  93       // ignored since the method will be re-registered if they
  94       // are later patched to be non-NULL.
  95       immediate_oops.push(r->oop_addr());
  96     }
  97   }
  98 
  99   // Attach GC data to nmethod
 100   ZNMethodData* data = gc_data(nm);
 101   if (data == NULL) {
 102     data = ZNMethodData::create(nm);
 103     set_gc_data(nm, data);
 104   }
 105 
 106   // Attach oops in GC data
 107   ZNMethodDataOops* const new_oops = ZNMethodDataOops::create(immediate_oops, non_immediate_oops);
 108   ZNMethodDataOops* const old_oops = data->swap_oops(new_oops);
 109   ZNMethodDataOops::destroy(old_oops);
 110 }
 111 
 112 void ZNMethodTable::detach_gc_data(nmethod* nm) {
 113   // Destroy GC data
 114   ZNMethodData::destroy(gc_data(nm));
 115   set_gc_data(nm, NULL);
 116 }
 117 
 118 ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) {
 119   ZNMethodData* const data = gc_data(nm);
 120   if (data == NULL) {
 121     return NULL;
 122   }
 123   return data->lock();
 124 }
 125 
 126 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
 127   assert(is_power_of_2(size), "Invalid size");
 128   const size_t mask = size - 1;
 129   const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
 130   return hash & mask;
 131 }
 132 
 133 size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
 134   assert(is_power_of_2(size), "Invalid size");
 135   const size_t mask = size - 1;
 136   return (prev_index + 1) & mask;
 137 }
 138 
 139 bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 140   const ZNMethodTableEntry entry(nm);
 141   size_t index = first_index(nm, size);
 142 
 143   for (;;) {
 144     const ZNMethodTableEntry table_entry = table[index];
 145 
 146     if (!table_entry.registered() && !table_entry.unregistered()) {
 147       // Insert new entry
 148       table[index] = entry;
 149       return true;
 150     }
 151 
 152     if (table_entry.registered() && table_entry.method() == nm) {
 153       // Replace existing entry
 154       table[index] = entry;
 155       return false;
 156     }
 157 
 158     index = next_index(index, size);
 159   }
 160 }
 161 
 162 void ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
 163   size_t index = first_index(nm, size);
 164 
 165   for (;;) {
 166     const ZNMethodTableEntry table_entry = table[index];
 167     assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
 168 
 169     if (table_entry.registered() && table_entry.method() == nm) {
 170       // Remove entry
 171       table[index] = ZNMethodTableEntry(true /* unregistered */);
 172       return;
 173     }
 174 
 175     index = next_index(index, size);
 176   }
 177 }
 178 
 179 void ZNMethodTable::rebuild(size_t new_size) {
 180   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 181 
 182   assert(is_power_of_2(new_size), "Invalid size");
 183 
 184   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
 185                          SIZE_FORMAT "->" SIZE_FORMAT " entries, "
 186                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
 187                          SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
 188                          _size, new_size,
 189                          _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
 190                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 191 
 192   // Allocate new table
 193   ZNMethodTableEntry* const new_table = ZNMethodTable::create(new_size);
 194 
 195   // Transfer all registered entries
 196   for (size_t i = 0; i < _size; i++) {
 197     const ZNMethodTableEntry entry = _table[i];
 198     if (entry.registered()) {
 199       register_entry(new_table, new_size, entry.method());
 200     }
 201   }
 202 
 203   // Free old table
 204   ZNMethodTable::destroy(_table);
 205 
 206   // Install new table
 207   _table = new_table;
 208   _size = new_size;
 209   _nunregistered = 0;
 210 }
 211 
 212 void ZNMethodTable::rebuild_if_needed() {
 213   // The hash table uses linear probing. To avoid wasting memory while
 214   // at the same time maintaining good hash collision behavior we want
 215   // to keep the table occupancy between 30% and 70%. The table always
 216   // grows/shrinks by doubling/halving its size. Pruning of unregistered
 217   // entries is done by rebuilding the table with or without resizing it.
 218   const size_t min_size = 1024;
 219   const size_t shrink_threshold = _size * 0.30;
 220   const size_t prune_threshold = _size * 0.65;
 221   const size_t grow_threshold = _size * 0.70;
 222 
 223   if (_size == 0) {
 224     // Initialize table
 225     rebuild(min_size);
 226   } else if (_nregistered < shrink_threshold && _size > min_size) {
 227     // Shrink table
 228     rebuild(_size / 2);
 229   } else if (_nregistered + _nunregistered > grow_threshold) {
 230     // Prune or grow table
 231     if (_nregistered < prune_threshold) {
 232       // Prune table
 233       rebuild(_size);
 234     } else {
 235       // Grow table
 236       rebuild(_size * 2);
 237     }
 238   }
 239 }
 240 
 241 void ZNMethodTable::log_register(const nmethod* nm) {
 242   LogTarget(Trace, gc, nmethod) log;
 243   if (!log.is_enabled()) {
 244     return;
 245   }
 246 
 247   const ZNMethodDataOops* const oops = gc_data(nm)->oops();
 248 
 249   log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
 250             "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
 251             nm->method()->method_holder()->external_name(),
 252             nm->method()->name()->as_C_string(),
 253             p2i(nm),
 254             nm->compiler_name(),
 255             nm->oops_count() - 1,
 256             oops->immediates_count(),
 257             oops->has_non_immediates() ? "Yes" : "No");
 258 
 259   LogTarget(Trace, gc, nmethod, oops) log_oops;
 260   if (!log_oops.is_enabled()) {
 261     return;
 262   }
 263 
 264   // Print nmethod oops table
 265   oop* const begin = nm->oops_begin();
 266   oop* const end = nm->oops_end();
 267   for (oop* p = begin; p < end; p++) {
 268     log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
 269                    (p - begin), p2i(*p), (*p)->klass()->external_name());
 270   }
 271 
 272   // Print nmethod immediate oops
 273   if (oops->immediates_count() > 0) {
 274     oop** const begin = oops->immediates_begin();
 275     oop** const end = oops->immediates_end();
 276     for (oop** p = begin; p < end; p++) {
 277       log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
 278                      (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
 279     }
 280   }
 281 }
 282 
 283 void ZNMethodTable::log_unregister(const nmethod* nm) {
 284   LogTarget(Debug, gc, nmethod) log;
 285   if (!log.is_enabled()) {
 286     return;
 287   }
 288 
 289   log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
 290             nm->method()->method_holder()->external_name(),
 291             nm->method()->name()->as_C_string(),
 292             p2i(nm));
 293 }
 294 
 295 size_t ZNMethodTable::registered_nmethods() {
 296   return _nregistered;
 297 }
 298 
 299 size_t ZNMethodTable::unregistered_nmethods() {
 300   return _nunregistered;
 301 }
 302 
 303 void ZNMethodTable::register_nmethod(nmethod* nm) {
 304   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 305   ResourceMark rm;
 306 
 307   // Grow/Shrink/Prune table if needed
 308   rebuild_if_needed();
 309 
 310   // Create and attach gc data
 311   attach_gc_data(nm);
 312 
 313   log_register(nm);
 314 
 315   // Insert new entry
 316   if (register_entry(_table, _size, nm)) {
 317     // New entry registered. When register_entry() instead returns
 318     // false the nmethod was already in the table so we do not want
 319     // to increase number of registered entries in that case.
 320     _nregistered++;
 321   }
 322 
 323   // Disarm nmethod entry barrier
 324   disarm_nmethod(nm);
 325 }
 326 
 327 void ZNMethodTable::wait_until_iteration_done() {
 328   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 329 
 330   while (_iteration.in_progress()) {
 331     CodeCache_lock->wait(Monitor::_no_safepoint_check_flag);
 332   }
 333 }
 334 
 335 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
 336   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 337 
 338   if (Thread::current()->is_Code_cache_sweeper_thread()) {
 339     // The sweeper must wait for any ongoing iteration to complete
 340     // before it can unregister an nmethod.
 341     ZNMethodTable::wait_until_iteration_done();
 342   }
 343 
 344   ResourceMark rm;
 345 
 346   log_unregister(nm);
 347 
 348   // Remove entry
 349   unregister_entry(_table, _size, nm);
 350   _nunregistered++;
 351   _nregistered--;
 352 
 353   detach_gc_data(nm);
 354 }
 355 
 356 void ZNMethodTable::disarm_nmethod(nmethod* nm) {
 357   BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 358   if (bs != NULL) {
 359     bs->disarm(nm);
 360   }
 361 }
 362 
 363 void ZNMethodTable::nmethods_do_begin() {
 364   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 365 
 366   // Make sure we don't free data while iterating
 367   ZNMethodAllocator::activate_deferred_frees();
 368 
 369   // Prepare iteration
 370   _iteration.nmethods_do_begin(_table, _size);
 371 }
 372 
 373 void ZNMethodTable::nmethods_do_end() {
 374   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 375 
 376   // Finish iteration
 377   _iteration.nmethods_do_end();
 378 
 379   // Process deferred frees
 380   ZNMethodAllocator::deactivate_and_process_deferred_frees();
 381 
 382   // Notify iteration done
 383   CodeCache_lock->notify_all();
 384 }
 385 
 386 void ZNMethodTable::oops_do(nmethod* nm, OopClosure* cl) {
 387   // Process oops table
 388   oop* const begin = nm->oops_begin();
 389   oop* const end = nm->oops_end();
 390   for (oop* p = begin; p < end; p++) {
 391     if (*p != Universe::non_oop_word()) {
 392       cl->do_oop(p);
 393     }
 394   }
 395 
 396   ZNMethodDataOops* const oops = gc_data(nm)->oops();
 397 
 398   // Process immediate oops
 399   if (oops->immediates_count() > 0) {
 400     oop** const begin = oops->immediates_begin();
 401     oop** const end = oops->immediates_end();
 402     for (oop** p = begin; p < end; p++) {
 403       if (**p != Universe::non_oop_word()) {
 404         cl->do_oop(*p);
 405       }
 406     }
 407   }
 408 
 409   // Process non-immediate oops
 410   if (oops->has_non_immediates()) {
 411     nm->fix_oop_relocations();
 412   }
 413 }
 414 
 415 class ZNMethodToOopsDo : public ZNMethodClosure {
 416 private:
 417   OopClosure* _cl;
 418 
 419 public:
 420   ZNMethodToOopsDo(OopClosure* cl) :
 421       _cl(cl) {}
 422 
 423   void do_nmethod(nmethod* nm) {
 424     ZNMethodTable::oops_do(nm, _cl);
 425   }
 426 };
 427 
 428 void ZNMethodTable::oops_do(OopClosure* cl) {
 429   ZNMethodToOopsDo nm_cl(cl);
 430   nmethods_do(&nm_cl);
 431 }
 432 
 433 void ZNMethodTable::nmethods_do(ZNMethodClosure* cl) {
 434   _iteration.nmethods_do(cl);
 435 }
 436 
 437 class ZNMethodTableUnlinkClosure : public ZNMethodClosure {
 438 private:
 439   bool          _unloading_occurred;
 440   volatile bool _failed;
 441 
 442   void set_failed() {
 443     Atomic::store(true, &_failed);
 444   }
 445 
 446 public:
 447   ZNMethodTableUnlinkClosure(bool unloading_occurred) :
 448       _unloading_occurred(unloading_occurred),
 449       _failed(false) {}
 450 
 451   virtual void do_nmethod(nmethod* nm) {
 452     if (failed()) {
 453       return;
 454     }
 455 
 456     if (!nm->is_alive()) {
 457       return;
 458     }
 459 
 460     ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
 461 
 462     if (nm->is_unloading()) {
 463       // Unlinking of the dependencies must happen before the
 464       // handshake separating unlink and purge.
 465       nm->flush_dependencies(false /* delete_immediately */);
 466 
 467       // We don't need to take the lock when unlinking nmethods from
 468       // the Method, because it is only concurrently unlinked by
 469       // the entry barrier, which acquires the per nmethod lock.
 470       nm->unlink_from_method(false /* acquire_lock */);
 471       return;
 472     }
 473 
 474     // Heal oops and disarm
 475     ZNMethodOopClosure cl;
 476     ZNMethodTable::oops_do(nm, &cl);
 477     ZNMethodTable::disarm_nmethod(nm);
 478 
 479     // Clear compiled ICs and exception caches
 480     if (!nm->unload_nmethod_caches(_unloading_occurred)) {
 481       set_failed();
 482     }
 483   }
 484 
 485   bool failed() const {
 486     return Atomic::load(&_failed);
 487   }
 488 };
 489 
 490 class ZNMethodTableUnlinkTask : public ZTask {
 491 private:
 492   ZNMethodTableUnlinkClosure _cl;
 493   ICRefillVerifier*          _verifier;
 494 
 495 public:
 496   ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
 497       ZTask("ZNMethodTableUnlinkTask"),
 498       _cl(unloading_occurred),
 499       _verifier(verifier) {
 500     ZNMethodTable::nmethods_do_begin();
 501   }
 502 
 503   ~ZNMethodTableUnlinkTask() {
 504     ZNMethodTable::nmethods_do_end();
 505   }
 506 
 507   virtual void work() {
 508     ICRefillVerifierMark mark(_verifier);
 509     ZNMethodTable::nmethods_do(&_cl);
 510   }
 511 
 512   bool success() const {
 513     return !_cl.failed();
 514   }
 515 };
 516 
 517 void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) {
 518   for (;;) {
 519     ICRefillVerifier verifier;
 520 
 521     {
 522       ZNMethodTableUnlinkTask task(unloading_occurred, &verifier);
 523       workers->run_concurrent(&task);
 524       if (task.success()) {
 525         return;
 526       }
 527     }
 528 
 529     // Cleaning failed because we ran out of transitional IC stubs,
 530     // so we have to refill and try again. Refilling requires taking
 531     // a safepoint, so we temporarily leave the suspendible thread set.
 532     SuspendibleThreadSetLeaver sts;
 533     InlineCacheBuffer::refill_ic_stubs();
 534   }
 535 }
 536 
 537 class ZNMethodTablePurgeClosure : public ZNMethodClosure {
 538 public:
 539   virtual void do_nmethod(nmethod* nm) {
 540     if (nm->is_alive() && nm->is_unloading()) {
 541       nm->make_unloaded();
 542     }
 543   }
 544 };
 545 
 546 class ZNMethodTablePurgeTask : public ZTask {
 547 private:
 548   ZNMethodTablePurgeClosure _cl;
 549 
 550 public:
 551   ZNMethodTablePurgeTask() :
 552       ZTask("ZNMethodTablePurgeTask"),
 553       _cl() {
 554     ZNMethodTable::nmethods_do_begin();
 555   }
 556 
 557   ~ZNMethodTablePurgeTask() {
 558     ZNMethodTable::nmethods_do_end();
 559   }
 560 
 561   virtual void work() {
 562     ZNMethodTable::nmethods_do(&_cl);
 563   }
 564 };
 565 
 566 void ZNMethodTable::purge(ZWorkers* workers) {
 567   ZNMethodTablePurgeTask task;
 568   workers->run_concurrent(&task);
 569 }
--- EOF ---