1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/dependencies.hpp"
  29 #include "code/nativeInst.hpp"
  30 #include "code/nmethod.hpp"
  31 #include "code/scopeDesc.hpp"
  32 #include "compiler/abstractCompiler.hpp"
  33 #include "compiler/compileBroker.hpp"
  34 #include "compiler/compileLog.hpp"
  35 #include "compiler/compilerDirectives.hpp"
  36 #include "compiler/directivesParser.hpp"
  37 #include "compiler/disassembler.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "oops/methodData.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "prims/jvmtiRedefineClassesTrace.hpp"
  43 #include "prims/jvmtiImpl.hpp"
  44 #include "runtime/atomic.inline.hpp"
  45 #include "runtime/orderAccess.inline.hpp"
  46 #include "runtime/os.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "runtime/sweeper.hpp"
  49 #include "utilities/resourceHash.hpp"
  50 #include "utilities/dtrace.hpp"
  51 #include "utilities/events.hpp"
  52 #include "utilities/xmlstream.hpp"
  53 #include "logging/log.hpp"
  54 #ifdef TARGET_ARCH_x86
  55 # include "nativeInst_x86.hpp"
  56 #endif
  57 #ifdef TARGET_ARCH_sparc
  58 # include "nativeInst_sparc.hpp"
  59 #endif
  60 #ifdef TARGET_ARCH_zero
  61 # include "nativeInst_zero.hpp"
  62 #endif
  63 #ifdef TARGET_ARCH_arm
  64 # include "nativeInst_arm.hpp"
  65 #endif
  66 #ifdef TARGET_ARCH_ppc
  67 # include "nativeInst_ppc.hpp"
  68 #endif
  69 #ifdef SHARK
  70 #include "shark/sharkCompiler.hpp"
  71 #endif
  72 #if INCLUDE_JVMCI
  73 #include "jvmci/jvmciJavaClasses.hpp"
  74 #endif
  75 
  76 unsigned char nmethod::_global_unloading_clock = 0;
  77 
  78 #ifdef DTRACE_ENABLED
  79 
  80 // Only bother with this argument setup if dtrace is available
  81 
  82 #define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
  83   {                                                                       \
  84     Method* m = (method);                                                 \
  85     if (m != NULL) {                                                      \
  86       Symbol* klass_name = m->klass_name();                               \
  87       Symbol* name = m->name();                                           \
  88       Symbol* signature = m->signature();                                 \
  89       HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
  90         (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
  91         (char *) name->bytes(), name->utf8_length(),                               \
  92         (char *) signature->bytes(), signature->utf8_length());                    \
  93     }                                                                     \
  94   }
  95 
  96 #else //  ndef DTRACE_ENABLED
  97 
  98 #define DTRACE_METHOD_UNLOAD_PROBE(method)
  99 
 100 #endif
 101 
 102 bool nmethod::is_compiled_by_c1() const {
 103   if (compiler() == NULL) {
 104     return false;
 105   }
 106   return compiler()->is_c1();
 107 }
 108 bool nmethod::is_compiled_by_jvmci() const {
 109   if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
 110   if (is_native_method()) return false;
 111   return compiler()->is_jvmci();
 112 }
 113 bool nmethod::is_compiled_by_c2() const {
 114   if (compiler() == NULL) {
 115     return false;
 116   }
 117   return compiler()->is_c2();
 118 }
 119 bool nmethod::is_compiled_by_shark() const {
 120   if (compiler() == NULL) {
 121     return false;
 122   }
 123   return compiler()->is_shark();
 124 }
 125 
 126 
 127 
 128 //---------------------------------------------------------------------------------
 129 // NMethod statistics
 130 // They are printed under various flags, including:
 131 //   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
 132 // (In the latter two cases, they like other stats are printed to the log only.)
 133 
 134 #ifndef PRODUCT
 135 // These variables are put into one block to reduce relocations
 136 // and make it simpler to print from the debugger.
 137 struct java_nmethod_stats_struct {
 138   int nmethod_count;
 139   int total_size;
 140   int relocation_size;
 141   int consts_size;
 142   int insts_size;
 143   int stub_size;
 144   int scopes_data_size;
 145   int scopes_pcs_size;
 146   int dependencies_size;
 147   int handler_table_size;
 148   int nul_chk_table_size;
 149   int oops_size;
 150   int metadata_size;
 151 
 152   void note_nmethod(nmethod* nm) {
 153     nmethod_count += 1;
 154     total_size          += nm->size();
 155     relocation_size     += nm->relocation_size();
 156     consts_size         += nm->consts_size();
 157     insts_size          += nm->insts_size();
 158     stub_size           += nm->stub_size();
 159     oops_size           += nm->oops_size();
 160     metadata_size       += nm->metadata_size();
 161     scopes_data_size    += nm->scopes_data_size();
 162     scopes_pcs_size     += nm->scopes_pcs_size();
 163     dependencies_size   += nm->dependencies_size();
 164     handler_table_size  += nm->handler_table_size();
 165     nul_chk_table_size  += nm->nul_chk_table_size();
 166   }
 167   void print_nmethod_stats(const char* name) {
 168     if (nmethod_count == 0)  return;
 169     tty->print_cr("Statistics for %d bytecoded nmethods for %s:", nmethod_count, name);
 170     if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
 171     if (nmethod_count != 0)       tty->print_cr(" header         = " SIZE_FORMAT, nmethod_count * sizeof(nmethod));
 172     if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
 173     if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
 174     if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
 175     if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
 176     if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
 177     if (metadata_size != 0)       tty->print_cr(" metadata       = %d", metadata_size);
 178     if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
 179     if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
 180     if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
 181     if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
 182     if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
 183   }
 184 };
 185 
 186 struct native_nmethod_stats_struct {
 187   int native_nmethod_count;
 188   int native_total_size;
 189   int native_relocation_size;
 190   int native_insts_size;
 191   int native_oops_size;
 192   int native_metadata_size;
 193   void note_native_nmethod(nmethod* nm) {
 194     native_nmethod_count += 1;
 195     native_total_size       += nm->size();
 196     native_relocation_size  += nm->relocation_size();
 197     native_insts_size       += nm->insts_size();
 198     native_oops_size        += nm->oops_size();
 199     native_metadata_size    += nm->metadata_size();
 200   }
 201   void print_native_nmethod_stats() {
 202     if (native_nmethod_count == 0)  return;
 203     tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
 204     if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
 205     if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
 206     if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
 207     if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
 208     if (native_metadata_size != 0)    tty->print_cr(" N. metadata    = %d", native_metadata_size);
 209   }
 210 };
 211 
 212 struct pc_nmethod_stats_struct {
 213   int pc_desc_resets;   // number of resets (= number of caches)
 214   int pc_desc_queries;  // queries to nmethod::find_pc_desc
 215   int pc_desc_approx;   // number of those which have approximate true
 216   int pc_desc_repeats;  // number of _pc_descs[0] hits
 217   int pc_desc_hits;     // number of LRU cache hits
 218   int pc_desc_tests;    // total number of PcDesc examinations
 219   int pc_desc_searches; // total number of quasi-binary search steps
 220   int pc_desc_adds;     // number of LUR cache insertions
 221 
 222   void print_pc_stats() {
 223     tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
 224                   pc_desc_queries,
 225                   (double)(pc_desc_tests + pc_desc_searches)
 226                   / pc_desc_queries);
 227     tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
 228                   pc_desc_resets,
 229                   pc_desc_queries, pc_desc_approx,
 230                   pc_desc_repeats, pc_desc_hits,
 231                   pc_desc_tests, pc_desc_searches, pc_desc_adds);
 232   }
 233 };
 234 
 235 #ifdef COMPILER1
 236 static java_nmethod_stats_struct c1_java_nmethod_stats;
 237 #endif
 238 #ifdef COMPILER2
 239 static java_nmethod_stats_struct c2_java_nmethod_stats;
 240 #endif
 241 #if INCLUDE_JVMCI
 242 static java_nmethod_stats_struct jvmci_java_nmethod_stats;
 243 #endif
 244 #ifdef SHARK
 245 static java_nmethod_stats_struct shark_java_nmethod_stats;
 246 #endif
 247 static java_nmethod_stats_struct unknown_java_nmethod_stats;
 248 
 249 static native_nmethod_stats_struct native_nmethod_stats;
 250 static pc_nmethod_stats_struct pc_nmethod_stats;
 251 
 252 static void note_java_nmethod(nmethod* nm) {
 253 #ifdef COMPILER1
 254   if (nm->is_compiled_by_c1()) {
 255     c1_java_nmethod_stats.note_nmethod(nm);
 256   } else
 257 #endif
 258 #ifdef COMPILER2
 259   if (nm->is_compiled_by_c2()) {
 260     c2_java_nmethod_stats.note_nmethod(nm);
 261   } else
 262 #endif
 263 #if INCLUDE_JVMCI
 264   if (nm->is_compiled_by_jvmci()) {
 265     jvmci_java_nmethod_stats.note_nmethod(nm);
 266   } else
 267 #endif
 268 #ifdef SHARK
 269   if (nm->is_compiled_by_shark()) {
 270     shark_java_nmethod_stats.note_nmethod(nm);
 271   } else
 272 #endif
 273   {
 274     unknown_java_nmethod_stats.note_nmethod(nm);
 275   }
 276 }
 277 #endif // !PRODUCT
 278 
 279 //---------------------------------------------------------------------------------
 280 
 281 
 282 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
 283   assert(pc != NULL, "Must be non null");
 284   assert(exception.not_null(), "Must be non null");
 285   assert(handler != NULL, "Must be non null");
 286 
 287   _count = 0;
 288   _exception_type = exception->klass();
 289   _next = NULL;
 290 
 291   add_address_and_handler(pc,handler);
 292 }
 293 
 294 
 295 address ExceptionCache::match(Handle exception, address pc) {
 296   assert(pc != NULL,"Must be non null");
 297   assert(exception.not_null(),"Must be non null");
 298   if (exception->klass() == exception_type()) {
 299     return (test_address(pc));
 300   }
 301 
 302   return NULL;
 303 }
 304 
 305 
 306 bool ExceptionCache::match_exception_with_space(Handle exception) {
 307   assert(exception.not_null(),"Must be non null");
 308   if (exception->klass() == exception_type() && count() < cache_size) {
 309     return true;
 310   }
 311   return false;
 312 }
 313 
 314 
 315 address ExceptionCache::test_address(address addr) {
 316   for (int i=0; i<count(); i++) {
 317     if (pc_at(i) == addr) {
 318       return handler_at(i);
 319     }
 320   }
 321   return NULL;
 322 }
 323 
 324 
 325 bool ExceptionCache::add_address_and_handler(address addr, address handler) {
 326   if (test_address(addr) == handler) return true;
 327 
 328   int index = count();
 329   if (index < cache_size) {
 330     set_pc_at(index, addr);
 331     set_handler_at(index, handler);
 332     OrderAccess::storestore();
 333     increment_count();
 334     return true;
 335   }
 336   return false;
 337 }
 338 
 339 
 340 // private method for handling exception cache
 341 // These methods are private, and used to manipulate the exception cache
 342 // directly.
 343 ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
 344   ExceptionCache* ec = exception_cache();
 345   while (ec != NULL) {
 346     if (ec->match_exception_with_space(exception)) {
 347       return ec;
 348     }
 349     ec = ec->next();
 350   }
 351   return NULL;
 352 }
 353 
 354 
 355 //-----------------------------------------------------------------------------
 356 
 357 
 358 // Helper used by both find_pc_desc methods.
 359 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
 360   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
 361   if (!approximate)
 362     return pc->pc_offset() == pc_offset;
 363   else
 364     return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
 365 }
 366 
 367 void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
 368   if (initial_pc_desc == NULL) {
 369     _pc_descs[0] = NULL; // native method; no PcDescs at all
 370     return;
 371   }
 372   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_resets);
 373   // reset the cache by filling it with benign (non-null) values
 374   assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
 375   for (int i = 0; i < cache_size; i++)
 376     _pc_descs[i] = initial_pc_desc;
 377 }
 378 
 379 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
 380   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_queries);
 381   NOT_PRODUCT(if (approximate) ++pc_nmethod_stats.pc_desc_approx);
 382 
 383   // Note: one might think that caching the most recently
 384   // read value separately would be a win, but one would be
 385   // wrong.  When many threads are updating it, the cache
 386   // line it's in would bounce between caches, negating
 387   // any benefit.
 388 
 389   // In order to prevent race conditions do not load cache elements
 390   // repeatedly, but use a local copy:
 391   PcDesc* res;
 392 
 393   // Step one:  Check the most recently added value.
 394   res = _pc_descs[0];
 395   if (res == NULL) return NULL;  // native method; no PcDescs at all
 396   if (match_desc(res, pc_offset, approximate)) {
 397     NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
 398     return res;
 399   }
 400 
 401   // Step two:  Check the rest of the LRU cache.
 402   for (int i = 1; i < cache_size; ++i) {
 403     res = _pc_descs[i];
 404     if (res->pc_offset() < 0) break;  // optimization: skip empty cache
 405     if (match_desc(res, pc_offset, approximate)) {
 406       NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
 407       return res;
 408     }
 409   }
 410 
 411   // Report failure.
 412   return NULL;
 413 }
 414 
 415 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
 416   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
 417   // Update the LRU cache by shifting pc_desc forward.
 418   for (int i = 0; i < cache_size; i++)  {
 419     PcDesc* next = _pc_descs[i];
 420     _pc_descs[i] = pc_desc;
 421     pc_desc = next;
 422   }
 423 }
 424 
 425 // adjust pcs_size so that it is a multiple of both oopSize and
 426 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
 427 // of oopSize, then 2*sizeof(PcDesc) is)
 428 static int adjust_pcs_size(int pcs_size) {
 429   int nsize = round_to(pcs_size,   oopSize);
 430   if ((nsize % sizeof(PcDesc)) != 0) {
 431     nsize = pcs_size + sizeof(PcDesc);
 432   }
 433   assert((nsize % oopSize) == 0, "correct alignment");
 434   return nsize;
 435 }
 436 
 437 //-----------------------------------------------------------------------------
 438 
 439 
 440 void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
 441   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
 442   assert(new_entry != NULL,"Must be non null");
 443   assert(new_entry->next() == NULL, "Must be null");
 444 
 445   if (exception_cache() != NULL) {
 446     new_entry->set_next(exception_cache());
 447   }
 448   set_exception_cache(new_entry);
 449 }
 450 
 451 void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) {
 452   ExceptionCache* prev = NULL;
 453   ExceptionCache* curr = exception_cache();
 454 
 455   while (curr != NULL) {
 456     ExceptionCache* next = curr->next();
 457 
 458     Klass* ex_klass = curr->exception_type();
 459     if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
 460       if (prev == NULL) {
 461         set_exception_cache(next);
 462       } else {
 463         prev->set_next(next);
 464       }
 465       delete curr;
 466       // prev stays the same.
 467     } else {
 468       prev = curr;
 469     }
 470 
 471     curr = next;
 472   }
 473 }
 474 
 475 // public method for accessing the exception cache
 476 // These are the public access methods.
 477 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
 478   // We never grab a lock to read the exception cache, so we may
 479   // have false negatives. This is okay, as it can only happen during
 480   // the first few exception lookups for a given nmethod.
 481   ExceptionCache* ec = exception_cache();
 482   while (ec != NULL) {
 483     address ret_val;
 484     if ((ret_val = ec->match(exception,pc)) != NULL) {
 485       return ret_val;
 486     }
 487     ec = ec->next();
 488   }
 489   return NULL;
 490 }
 491 
 492 
 493 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
 494   // There are potential race conditions during exception cache updates, so we
 495   // must own the ExceptionCache_lock before doing ANY modifications. Because
 496   // we don't lock during reads, it is possible to have several threads attempt
 497   // to update the cache with the same data. We need to check for already inserted
 498   // copies of the current data before adding it.
 499 
 500   MutexLocker ml(ExceptionCache_lock);
 501   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
 502 
 503   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
 504     target_entry = new ExceptionCache(exception,pc,handler);
 505     add_exception_cache_entry(target_entry);
 506   }
 507 }
 508 
 509 
 510 //-------------end of code for ExceptionCache--------------
 511 
 512 
 513 int nmethod::total_size() const {
 514   return
 515     consts_size()        +
 516     insts_size()         +
 517     stub_size()          +
 518     scopes_data_size()   +
 519     scopes_pcs_size()    +
 520     handler_table_size() +
 521     nul_chk_table_size();
 522 }
 523 
 524 const char* nmethod::compile_kind() const {
 525   if (is_osr_method())     return "osr";
 526   if (method() != NULL && is_native_method())  return "c2n";
 527   return NULL;
 528 }
 529 
 530 // Fill in default values for various flag fields
 531 void nmethod::init_defaults() {
 532   _state                      = in_use;
 533   _unloading_clock            = 0;
 534   _marked_for_reclamation     = 0;
 535   _has_flushed_dependencies   = 0;
 536   _has_unsafe_access          = 0;
 537   _has_method_handle_invokes  = 0;
 538   _lazy_critical_native       = 0;
 539   _has_wide_vectors           = 0;
 540   _mark_for_deoptimization_status = not_marked;
 541   _lock_count                 = 0;
 542   _stack_traversal_mark       = 0;
 543   _unload_reported            = false; // jvmti state
 544 
 545 #ifdef ASSERT
 546   _oops_are_stale             = false;
 547 #endif
 548 
 549   _oops_do_mark_link       = NULL;
 550   _jmethod_id              = NULL;
 551   _osr_link                = NULL;
 552   if (UseG1GC) {
 553     _unloading_next        = NULL;
 554   } else {
 555     _scavenge_root_link    = NULL;
 556   }
 557   _scavenge_root_state     = 0;
 558   _compiler                = NULL;
 559 #if INCLUDE_RTM_OPT
 560   _rtm_state               = NoRTM;
 561 #endif
 562 #if INCLUDE_JVMCI
 563   _jvmci_installed_code   = NULL;
 564   _speculation_log        = NULL;
 565 #endif
 566 }
 567 
 568 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
 569   int compile_id,
 570   CodeBuffer *code_buffer,
 571   int vep_offset,
 572   int frame_complete,
 573   int frame_size,
 574   ByteSize basic_lock_owner_sp_offset,
 575   ByteSize basic_lock_sp_offset,
 576   OopMapSet* oop_maps) {
 577   code_buffer->finalize_oop_references(method);
 578   // create nmethod
 579   nmethod* nm = NULL;
 580   {
 581     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 582     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 583     CodeOffsets offsets;
 584     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 585     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 586     nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
 587                                             compile_id, &offsets,
 588                                             code_buffer, frame_size,
 589                                             basic_lock_owner_sp_offset,
 590                                             basic_lock_sp_offset, oop_maps);
 591     NOT_PRODUCT(if (nm != NULL)  native_nmethod_stats.note_native_nmethod(nm));
 592   }
 593   // verify nmethod
 594   debug_only(if (nm) nm->verify();) // might block
 595 
 596   if (nm != NULL) {
 597     nm->log_new_nmethod();
 598   }
 599 
 600   return nm;
 601 }
 602 
 603 nmethod* nmethod::new_nmethod(const methodHandle& method,
 604   int compile_id,
 605   int entry_bci,
 606   CodeOffsets* offsets,
 607   int orig_pc_offset,
 608   DebugInformationRecorder* debug_info,
 609   Dependencies* dependencies,
 610   CodeBuffer* code_buffer, int frame_size,
 611   OopMapSet* oop_maps,
 612   ExceptionHandlerTable* handler_table,
 613   ImplicitExceptionTable* nul_chk_table,
 614   AbstractCompiler* compiler,
 615   int comp_level
 616 #if INCLUDE_JVMCI
 617   , Handle installed_code,
 618   Handle speculationLog
 619 #endif
 620 )
 621 {
 622   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 623   code_buffer->finalize_oop_references(method);
 624   // create nmethod
 625   nmethod* nm = NULL;
 626   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 627     int nmethod_size =
 628       allocation_size(code_buffer, sizeof(nmethod))
 629       + adjust_pcs_size(debug_info->pcs_size())
 630       + round_to(dependencies->size_in_bytes() , oopSize)
 631       + round_to(handler_table->size_in_bytes(), oopSize)
 632       + round_to(nul_chk_table->size_in_bytes(), oopSize)
 633       + round_to(debug_info->data_size()       , oopSize);
 634 
 635     nm = new (nmethod_size, comp_level)
 636     nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
 637             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
 638             oop_maps,
 639             handler_table,
 640             nul_chk_table,
 641             compiler,
 642             comp_level
 643 #if INCLUDE_JVMCI
 644             , installed_code,
 645             speculationLog
 646 #endif
 647             );
 648 
 649     if (nm != NULL) {
 650       // To make dependency checking during class loading fast, record
 651       // the nmethod dependencies in the classes it is dependent on.
 652       // This allows the dependency checking code to simply walk the
 653       // class hierarchy above the loaded class, checking only nmethods
 654       // which are dependent on those classes.  The slow way is to
 655       // check every nmethod for dependencies which makes it linear in
 656       // the number of methods compiled.  For applications with a lot
 657       // classes the slow way is too slow.
 658       for (Dependencies::DepStream deps(nm); deps.next(); ) {
 659         if (deps.type() == Dependencies::call_site_target_value) {
 660           // CallSite dependencies are managed on per-CallSite instance basis.
 661           oop call_site = deps.argument_oop(0);
 662           MethodHandles::add_dependent_nmethod(call_site, nm);
 663         } else {
 664           Klass* klass = deps.context_type();
 665           if (klass == NULL) {
 666             continue;  // ignore things like evol_method
 667           }
 668           // record this nmethod as dependent on this klass
 669           InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
 670         }
 671       }
 672       NOT_PRODUCT(if (nm != NULL)  note_java_nmethod(nm));
 673     }
 674   }
 675   // Do verification and logging outside CodeCache_lock.
 676   if (nm != NULL) {
 677     // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
 678     DEBUG_ONLY(nm->verify();)
 679     nm->log_new_nmethod();
 680   }
 681   return nm;
 682 }
 683 
 684 // For native wrappers
 685 nmethod::nmethod(
 686   Method* method,
 687   int nmethod_size,
 688   int compile_id,
 689   CodeOffsets* offsets,
 690   CodeBuffer* code_buffer,
 691   int frame_size,
 692   ByteSize basic_lock_owner_sp_offset,
 693   ByteSize basic_lock_sp_offset,
 694   OopMapSet* oop_maps )
 695   : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
 696              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
 697   _native_receiver_sp_offset(basic_lock_owner_sp_offset),
 698   _native_basic_lock_sp_offset(basic_lock_sp_offset)
 699 {
 700   {
 701     debug_only(NoSafepointVerifier nsv;)
 702     assert_locked_or_safepoint(CodeCache_lock);
 703 
 704     init_defaults();
 705     _method                  = method;
 706     _entry_bci               = InvocationEntryBci;
 707     // We have no exception handler or deopt handler make the
 708     // values something that will never match a pc like the nmethod vtable entry
 709     _exception_offset        = 0;
 710     _deoptimize_offset       = 0;
 711     _deoptimize_mh_offset    = 0;
 712     _orig_pc_offset          = 0;
 713 
 714     _consts_offset           = data_offset();
 715     _stub_offset             = data_offset();
 716     _oops_offset             = data_offset();
 717     _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
 718     _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
 719     _scopes_pcs_offset       = _scopes_data_offset;
 720     _dependencies_offset     = _scopes_pcs_offset;
 721     _handler_table_offset    = _dependencies_offset;
 722     _nul_chk_table_offset    = _handler_table_offset;
 723     _nmethod_end_offset      = _nul_chk_table_offset;
 724     _compile_id              = compile_id;
 725     _comp_level              = CompLevel_none;
 726     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 727     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 728     _osr_entry_point         = NULL;
 729     _exception_cache         = NULL;
 730     _pc_desc_cache.reset_to(NULL);
 731     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 732 
 733     code_buffer->copy_values_to(this);
 734     if (ScavengeRootsInCode) {
 735       if (detect_scavenge_root_oops()) {
 736         CodeCache::add_scavenge_root_nmethod(this);
 737       }
 738       Universe::heap()->register_nmethod(this);
 739     }
 740     debug_only(verify_scavenge_root_oops());
 741     CodeCache::commit(this);
 742   }
 743 
 744   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
 745     ttyLocker ttyl;  // keep the following output all in one block
 746     // This output goes directly to the tty, not the compiler log.
 747     // To enable tools to match it up with the compilation activity,
 748     // be sure to tag this tty output with the compile ID.
 749     if (xtty != NULL) {
 750       xtty->begin_head("print_native_nmethod");
 751       xtty->method(_method);
 752       xtty->stamp();
 753       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 754     }
 755     // print the header part first
 756     print();
 757     // then print the requested information
 758     if (PrintNativeNMethods) {
 759       print_code();
 760       if (oop_maps != NULL) {
 761         oop_maps->print();
 762       }
 763     }
 764     if (PrintRelocations) {
 765       print_relocations();
 766     }
 767     if (xtty != NULL) {
 768       xtty->tail("print_native_nmethod");
 769     }
 770   }
 771 }
 772 
 773 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
 774   return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
 775 }
 776 
 777 nmethod::nmethod(
 778   Method* method,
 779   int nmethod_size,
 780   int compile_id,
 781   int entry_bci,
 782   CodeOffsets* offsets,
 783   int orig_pc_offset,
 784   DebugInformationRecorder* debug_info,
 785   Dependencies* dependencies,
 786   CodeBuffer *code_buffer,
 787   int frame_size,
 788   OopMapSet* oop_maps,
 789   ExceptionHandlerTable* handler_table,
 790   ImplicitExceptionTable* nul_chk_table,
 791   AbstractCompiler* compiler,
 792   int comp_level
 793 #if INCLUDE_JVMCI
 794   , Handle installed_code,
 795   Handle speculation_log
 796 #endif
 797   )
 798   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
 799              nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
 800   _native_receiver_sp_offset(in_ByteSize(-1)),
 801   _native_basic_lock_sp_offset(in_ByteSize(-1))
 802 {
 803   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 804   {
 805     debug_only(NoSafepointVerifier nsv;)
 806     assert_locked_or_safepoint(CodeCache_lock);
 807 
 808     init_defaults();
 809     _method                  = method;
 810     _entry_bci               = entry_bci;
 811     _compile_id              = compile_id;
 812     _comp_level              = comp_level;
 813     _compiler                = compiler;
 814     _orig_pc_offset          = orig_pc_offset;
 815     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 816 
 817     // Section offsets
 818     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
 819     _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
 820 
 821 #if INCLUDE_JVMCI
 822     _jvmci_installed_code = installed_code();
 823     _speculation_log = (instanceOop)speculation_log();
 824 
 825     if (compiler->is_jvmci()) {
 826       // JVMCI might not produce any stub sections
 827       if (offsets->value(CodeOffsets::Exceptions) != -1) {
 828         _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
 829       } else {
 830         _exception_offset = -1;
 831       }
 832       if (offsets->value(CodeOffsets::Deopt) != -1) {
 833         _deoptimize_offset       = code_offset()          + offsets->value(CodeOffsets::Deopt);
 834       } else {
 835         _deoptimize_offset = -1;
 836       }
 837       if (offsets->value(CodeOffsets::DeoptMH) != -1) {
 838         _deoptimize_mh_offset  = code_offset()          + offsets->value(CodeOffsets::DeoptMH);
 839       } else {
 840         _deoptimize_mh_offset  = -1;
 841       }
 842     } else {
 843 #endif
 844     // Exception handler and deopt handler are in the stub section
 845     assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
 846     assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
 847 
 848     _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
 849     _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
 850     if (offsets->value(CodeOffsets::DeoptMH) != -1) {
 851       _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
 852     } else {
 853       _deoptimize_mh_offset  = -1;
 854 #if INCLUDE_JVMCI
 855     }
 856 #endif
 857     }
 858     if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
 859       _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
 860     } else {
 861       _unwind_handler_offset = -1;
 862     }
 863 
 864     _oops_offset             = data_offset();
 865     _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
 866     _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
 867 
 868     _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
 869     _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
 870     _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
 871     _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
 872     _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
 873 
 874     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
 875     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
 876     _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
 877     _exception_cache         = NULL;
 878     _pc_desc_cache.reset_to(scopes_pcs_begin());
 879 
 880     // Copy contents of ScopeDescRecorder to nmethod
 881     code_buffer->copy_values_to(this);
 882     debug_info->copy_to(this);
 883     dependencies->copy_to(this);
 884     if (ScavengeRootsInCode) {
 885       if (detect_scavenge_root_oops()) {
 886         CodeCache::add_scavenge_root_nmethod(this);
 887       }
 888       Universe::heap()->register_nmethod(this);
 889     }
 890     debug_only(verify_scavenge_root_oops());
 891 
 892     CodeCache::commit(this);
 893 
 894     // Copy contents of ExceptionHandlerTable to nmethod
 895     handler_table->copy_to(this);
 896     nul_chk_table->copy_to(this);
 897 
 898     // we use the information of entry points to find out if a method is
 899     // static or non static
 900     assert(compiler->is_c2() || compiler->is_jvmci() ||
 901            _method->is_static() == (entry_point() == _verified_entry_point),
 902            " entry points must be same for static methods and vice versa");
 903   }
 904 }
 905 
 906 // Print a short set of xml attributes to identify this nmethod.  The
 907 // output should be embedded in some other element.
 908 void nmethod::log_identity(xmlStream* log) const {
 909   log->print(" compile_id='%d'", compile_id());
 910   const char* nm_kind = compile_kind();
 911   if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
 912   if (compiler() != NULL) {
 913     log->print(" compiler='%s'", compiler()->name());
 914   }
 915   if (TieredCompilation) {
 916     log->print(" level='%d'", comp_level());
 917   }
 918 }
 919 
 920 
 921 #define LOG_OFFSET(log, name)                    \
 922   if (p2i(name##_end()) - p2i(name##_begin())) \
 923     log->print(" " XSTR(name) "_offset='" INTX_FORMAT "'"    , \
 924                p2i(name##_begin()) - p2i(this))
 925 
 926 
 927 void nmethod::log_new_nmethod() const {
 928   if (LogCompilation && xtty != NULL) {
 929     ttyLocker ttyl;
 930     HandleMark hm;
 931     xtty->begin_elem("nmethod");
 932     log_identity(xtty);
 933     xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
 934     xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
 935 
 936     LOG_OFFSET(xtty, relocation);
 937     LOG_OFFSET(xtty, consts);
 938     LOG_OFFSET(xtty, insts);
 939     LOG_OFFSET(xtty, stub);
 940     LOG_OFFSET(xtty, scopes_data);
 941     LOG_OFFSET(xtty, scopes_pcs);
 942     LOG_OFFSET(xtty, dependencies);
 943     LOG_OFFSET(xtty, handler_table);
 944     LOG_OFFSET(xtty, nul_chk_table);
 945     LOG_OFFSET(xtty, oops);
 946     LOG_OFFSET(xtty, metadata);
 947 
 948     xtty->method(method());
 949     xtty->stamp();
 950     xtty->end_elem();
 951   }
 952 }
 953 
 954 #undef LOG_OFFSET
 955 
 956 
 957 // Print out more verbose output usually for a newly created nmethod.
 958 void nmethod::print_on(outputStream* st, const char* msg) const {
 959   if (st != NULL) {
 960     ttyLocker ttyl;
 961     if (WizardMode) {
 962       CompileTask::print(st, this, msg, /*short_form:*/ true);
 963       st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
 964     } else {
 965       CompileTask::print(st, this, msg, /*short_form:*/ false);
 966     }
 967   }
 968 }
 969 
 970 void nmethod::maybe_print_nmethod(DirectiveSet* directive) {
 971   bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
 972   if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
 973     print_nmethod(printnmethods);
 974   }
 975 }
 976 
 977 void nmethod::print_nmethod(bool printmethod) {
 978   ttyLocker ttyl;  // keep the following output all in one block
 979   if (xtty != NULL) {
 980     xtty->begin_head("print_nmethod");
 981     xtty->stamp();
 982     xtty->end_head();
 983   }
 984   // print the header part first
 985   print();
 986   // then print the requested information
 987   if (printmethod) {
 988     print_code();
 989     print_pcs();
 990     if (oop_maps()) {
 991       oop_maps()->print();
 992     }
 993   }
 994   if (printmethod || PrintDebugInfo || CompilerOracle::has_option_string(_method, "PrintDebugInfo")) {
 995     print_scopes();
 996   }
 997   if (printmethod || PrintRelocations || CompilerOracle::has_option_string(_method, "PrintRelocations")) {
 998     print_relocations();
 999   }
1000   if (printmethod || PrintDependencies || CompilerOracle::has_option_string(_method, "PrintDependencies")) {
1001     print_dependencies();
1002   }
1003   if (printmethod || PrintExceptionHandlers) {
1004     print_handler_table();
1005     print_nul_chk_table();
1006   }
1007   if (printmethod) {
1008     print_recorded_oops();
1009     print_recorded_metadata();
1010   }
1011   if (xtty != NULL) {
1012     xtty->tail("print_nmethod");
1013   }
1014 }
1015 
1016 
1017 // Promote one word from an assembly-time handle to a live embedded oop.
1018 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
1019   if (handle == NULL ||
1020       // As a special case, IC oops are initialized to 1 or -1.
1021       handle == (jobject) Universe::non_oop_word()) {
1022     (*dest) = (oop) handle;
1023   } else {
1024     (*dest) = JNIHandles::resolve_non_null(handle);
1025   }
1026 }
1027 
1028 
1029 // Have to have the same name because it's called by a template
1030 void nmethod::copy_values(GrowableArray<jobject>* array) {
1031   int length = array->length();
1032   assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1033   oop* dest = oops_begin();
1034   for (int index = 0 ; index < length; index++) {
1035     initialize_immediate_oop(&dest[index], array->at(index));
1036   }
1037 
1038   // Now we can fix up all the oops in the code.  We need to do this
1039   // in the code because the assembler uses jobjects as placeholders.
1040   // The code and relocations have already been initialized by the
1041   // CodeBlob constructor, so it is valid even at this early point to
1042   // iterate over relocations and patch the code.
1043   fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
1044 }
1045 
1046 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1047   int length = array->length();
1048   assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
1049   Metadata** dest = metadata_begin();
1050   for (int index = 0 ; index < length; index++) {
1051     dest[index] = array->at(index);
1052   }
1053 }
1054 
1055 bool nmethod::is_at_poll_return(address pc) {
1056   RelocIterator iter(this, pc, pc+1);
1057   while (iter.next()) {
1058     if (iter.type() == relocInfo::poll_return_type)
1059       return true;
1060   }
1061   return false;
1062 }
1063 
1064 
1065 bool nmethod::is_at_poll_or_poll_return(address pc) {
1066   RelocIterator iter(this, pc, pc+1);
1067   while (iter.next()) {
1068     relocInfo::relocType t = iter.type();
1069     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
1070       return true;
1071   }
1072   return false;
1073 }
1074 
1075 
1076 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1077   // re-patch all oop-bearing instructions, just in case some oops moved
1078   RelocIterator iter(this, begin, end);
1079   while (iter.next()) {
1080     if (iter.type() == relocInfo::oop_type) {
1081       oop_Relocation* reloc = iter.oop_reloc();
1082       if (initialize_immediates && reloc->oop_is_immediate()) {
1083         oop* dest = reloc->oop_addr();
1084         initialize_immediate_oop(dest, (jobject) *dest);
1085       }
1086       // Refresh the oop-related bits of this instruction.
1087       reloc->fix_oop_relocation();
1088     } else if (iter.type() == relocInfo::metadata_type) {
1089       metadata_Relocation* reloc = iter.metadata_reloc();
1090       reloc->fix_metadata_relocation();
1091     }
1092   }
1093 }
1094 
1095 
1096 void nmethod::verify_oop_relocations() {
1097   // Ensure sure that the code matches the current oop values
1098   RelocIterator iter(this, NULL, NULL);
1099   while (iter.next()) {
1100     if (iter.type() == relocInfo::oop_type) {
1101       oop_Relocation* reloc = iter.oop_reloc();
1102       if (!reloc->oop_is_immediate()) {
1103         reloc->verify_oop_relocation();
1104       }
1105     }
1106   }
1107 }
1108 
1109 
1110 ScopeDesc* nmethod::scope_desc_at(address pc) {
1111   PcDesc* pd = pc_desc_at(pc);
1112   guarantee(pd != NULL, "scope must be present");
1113   return new ScopeDesc(this, pd->scope_decode_offset(),
1114                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
1115                        pd->return_oop());
1116 }
1117 
1118 
1119 void nmethod::clear_inline_caches() {
1120   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
1121   if (is_zombie()) {
1122     return;
1123   }
1124 
1125   RelocIterator iter(this);
1126   while (iter.next()) {
1127     iter.reloc()->clear_inline_cache();
1128   }
1129 }
1130 
1131 // Clear ICStubs of all compiled ICs
1132 void nmethod::clear_ic_stubs() {
1133   assert_locked_or_safepoint(CompiledIC_lock);
1134   RelocIterator iter(this);
1135   while(iter.next()) {
1136     if (iter.type() == relocInfo::virtual_call_type) {
1137       CompiledIC* ic = CompiledIC_at(&iter);
1138       ic->clear_ic_stub();
1139     }
1140   }
1141 }
1142 
1143 
1144 void nmethod::cleanup_inline_caches() {
1145   assert_locked_or_safepoint(CompiledIC_lock);
1146 
1147   // If the method is not entrant or zombie then a JMP is plastered over the
1148   // first few bytes.  If an oop in the old code was there, that oop
1149   // should not get GC'd.  Skip the first few bytes of oops on
1150   // not-entrant methods.
1151   address low_boundary = verified_entry_point();
1152   if (!is_in_use()) {
1153     low_boundary += NativeJump::instruction_size;
1154     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1155     // This means that the low_boundary is going to be a little too high.
1156     // This shouldn't matter, since oops of non-entrant methods are never used.
1157     // In fact, why are we bothering to look at oops in a non-entrant method??
1158   }
1159 
1160   // Find all calls in an nmethod and clear the ones that point to non-entrant,
1161   // zombie and unloaded nmethods.
1162   ResourceMark rm;
1163   RelocIterator iter(this, low_boundary);
1164   while(iter.next()) {
1165     switch(iter.type()) {
1166       case relocInfo::virtual_call_type:
1167       case relocInfo::opt_virtual_call_type: {
1168         CompiledIC *ic = CompiledIC_at(&iter);
1169         // Ok, to lookup references to zombies here
1170         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1171         if( cb != NULL && cb->is_nmethod() ) {
1172           nmethod* nm = (nmethod*)cb;
1173           // Clean inline caches pointing to zombie, non-entrant and unloaded methods
1174           if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
1175         }
1176         break;
1177       }
1178       case relocInfo::static_call_type: {
1179         CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1180         CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1181         if( cb != NULL && cb->is_nmethod() ) {
1182           nmethod* nm = (nmethod*)cb;
1183           // Clean inline caches pointing to zombie, non-entrant and unloaded methods
1184           if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
1185         }
1186         break;
1187       }
1188     }
1189   }
1190 }
1191 
1192 void nmethod::verify_clean_inline_caches() {
1193   assert_locked_or_safepoint(CompiledIC_lock);
1194 
1195   // If the method is not entrant or zombie then a JMP is plastered over the
1196   // first few bytes.  If an oop in the old code was there, that oop
1197   // should not get GC'd.  Skip the first few bytes of oops on
1198   // not-entrant methods.
1199   address low_boundary = verified_entry_point();
1200   if (!is_in_use()) {
1201     low_boundary += NativeJump::instruction_size;
1202     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1203     // This means that the low_boundary is going to be a little too high.
1204     // This shouldn't matter, since oops of non-entrant methods are never used.
1205     // In fact, why are we bothering to look at oops in a non-entrant method??
1206   }
1207 
1208   ResourceMark rm;
1209   RelocIterator iter(this, low_boundary);
1210   while(iter.next()) {
1211     switch(iter.type()) {
1212       case relocInfo::virtual_call_type:
1213       case relocInfo::opt_virtual_call_type: {
1214         CompiledIC *ic = CompiledIC_at(&iter);
1215         // Ok, to lookup references to zombies here
1216         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1217         if( cb != NULL && cb->is_nmethod() ) {
1218           nmethod* nm = (nmethod*)cb;
1219           // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1220           if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1221             assert(ic->is_clean(), "IC should be clean");
1222           }
1223         }
1224         break;
1225       }
1226       case relocInfo::static_call_type: {
1227         CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1228         CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1229         if( cb != NULL && cb->is_nmethod() ) {
1230           nmethod* nm = (nmethod*)cb;
1231           // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1232           if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1233             assert(csc->is_clean(), "IC should be clean");
1234           }
1235         }
1236         break;
1237       }
1238     }
1239   }
1240 }
1241 
1242 int nmethod::verify_icholder_relocations() {
1243   int count = 0;
1244 
1245   RelocIterator iter(this);
1246   while(iter.next()) {
1247     if (iter.type() == relocInfo::virtual_call_type) {
1248       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
1249         CompiledIC *ic = CompiledIC_at(&iter);
1250         if (TraceCompiledIC) {
1251           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
1252           ic->print();
1253         }
1254         assert(ic->cached_icholder() != NULL, "must be non-NULL");
1255         count++;
1256       }
1257     }
1258   }
1259 
1260   return count;
1261 }
1262 
1263 // This is a private interface with the sweeper.
1264 void nmethod::mark_as_seen_on_stack() {
1265   assert(is_alive(), "Must be an alive method");
1266   // Set the traversal mark to ensure that the sweeper does 2
1267   // cleaning passes before moving to zombie.
1268   set_stack_traversal_mark(NMethodSweeper::traversal_count());
1269 }
1270 
1271 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1272 // there are no activations on the stack, not in use by the VM,
1273 // and not in use by the ServiceThread)
1274 bool nmethod::can_convert_to_zombie() {
1275   assert(is_not_entrant(), "must be a non-entrant method");
1276 
1277   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1278   // count can be greater than the stack traversal count before it hits the
1279   // nmethod for the second time.
1280   return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1281          !is_locked_by_vm();
1282 }
1283 
1284 void nmethod::inc_decompile_count() {
1285   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1286   // Could be gated by ProfileTraps, but do not bother...
1287   Method* m = method();
1288   if (m == NULL)  return;
1289   MethodData* mdo = m->method_data();
1290   if (mdo == NULL)  return;
1291   // There is a benign race here.  See comments in methodData.hpp.
1292   mdo->inc_decompile_count();
1293 }
1294 
1295 void nmethod::increase_unloading_clock() {
1296   _global_unloading_clock++;
1297   if (_global_unloading_clock == 0) {
1298     // _nmethods are allocated with _unloading_clock == 0,
1299     // so 0 is never used as a clock value.
1300     _global_unloading_clock = 1;
1301   }
1302 }
1303 
1304 void nmethod::set_unloading_clock(unsigned char unloading_clock) {
1305   OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
1306 }
1307 
1308 unsigned char nmethod::unloading_clock() {
1309   return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
1310 }
1311 
1312 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1313 
1314   post_compiled_method_unload();
1315 
1316   // Since this nmethod is being unloaded, make sure that dependencies
1317   // recorded in instanceKlasses get flushed and pass non-NULL closure to
1318   // indicate that this work is being done during a GC.
1319   assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1320   assert(is_alive != NULL, "Should be non-NULL");
1321   // A non-NULL is_alive closure indicates that this is being called during GC.
1322   flush_dependencies(is_alive);
1323 
1324   // Break cycle between nmethod & method
1325   if (log_is_enabled(Trace, classunload)) {
1326     outputStream* log = LogHandle(classunload)::trace_stream();
1327     log->print_cr("making nmethod " INTPTR_FORMAT
1328                   " unloadable, Method*(" INTPTR_FORMAT
1329                   "), cause(" INTPTR_FORMAT ")",
1330                   p2i(this), p2i(_method), p2i(cause));
1331     if (!Universe::heap()->is_gc_active())
1332       cause->klass()->print_on(log);
1333   }
1334   // Unlink the osr method, so we do not look this up again
1335   if (is_osr_method()) {
1336     invalidate_osr_method();
1337   }
1338   // If _method is already NULL the Method* is about to be unloaded,
1339   // so we don't have to break the cycle. Note that it is possible to
1340   // have the Method* live here, in case we unload the nmethod because
1341   // it is pointing to some oop (other than the Method*) being unloaded.
1342   if (_method != NULL) {
1343     // OSR methods point to the Method*, but the Method* does not
1344     // point back!
1345     if (_method->code() == this) {
1346       _method->clear_code(); // Break a cycle
1347     }
1348     _method = NULL;            // Clear the method of this dead nmethod
1349   }
1350 
1351   // Make the class unloaded - i.e., change state and notify sweeper
1352   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1353   if (is_in_use()) {
1354     // Transitioning directly from live to unloaded -- so
1355     // we need to force a cache clean-up; remember this
1356     // for later on.
1357     CodeCache::set_needs_cache_clean(true);
1358   }
1359 
1360   // Unregister must be done before the state change
1361   Universe::heap()->unregister_nmethod(this);
1362 
1363   _state = unloaded;
1364 
1365   // Log the unloading.
1366   log_state_change();
1367 
1368 #if INCLUDE_JVMCI
1369   // The method can only be unloaded after the pointer to the installed code
1370   // Java wrapper is no longer alive. Here we need to clear out this weak
1371   // reference to the dead object. Nulling out the reference has to happen
1372   // after the method is unregistered since the original value may be still
1373   // tracked by the rset.
1374   maybe_invalidate_installed_code();
1375   // Clear these out after the nmethod has been unregistered and any
1376   // updates to the InstalledCode instance have been performed.
1377   _jvmci_installed_code = NULL;
1378   _speculation_log = NULL;
1379 #endif
1380 
1381   // The Method* is gone at this point
1382   assert(_method == NULL, "Tautology");
1383 
1384   set_osr_link(NULL);
1385   NMethodSweeper::report_state_change(this);
1386 }
1387 
1388 void nmethod::invalidate_osr_method() {
1389   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1390   // Remove from list of active nmethods
1391   if (method() != NULL)
1392     method()->method_holder()->remove_osr_nmethod(this);
1393 }
1394 
1395 void nmethod::log_state_change() const {
1396   if (LogCompilation) {
1397     if (xtty != NULL) {
1398       ttyLocker ttyl;  // keep the following output all in one block
1399       if (_state == unloaded) {
1400         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1401                          os::current_thread_id());
1402       } else {
1403         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1404                          os::current_thread_id(),
1405                          (_state == zombie ? " zombie='1'" : ""));
1406       }
1407       log_identity(xtty);
1408       xtty->stamp();
1409       xtty->end_elem();
1410     }
1411   }
1412   if (PrintCompilation && _state != unloaded) {
1413     print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
1414   }
1415 }
1416 
1417 /**
1418  * Common functionality for both make_not_entrant and make_zombie
1419  */
1420 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1421   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1422   assert(!is_zombie(), "should not already be a zombie");
1423 
1424   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1425   nmethodLocker nml(this);
1426   methodHandle the_method(method());
1427   NoSafepointVerifier nsv;
1428 
1429   // during patching, depending on the nmethod state we must notify the GC that
1430   // code has been unloaded, unregistering it. We cannot do this right while
1431   // holding the Patching_lock because we need to use the CodeCache_lock. This
1432   // would be prone to deadlocks.
1433   // This flag is used to remember whether we need to later lock and unregister.
1434   bool nmethod_needs_unregister = false;
1435 
1436   {
1437     // invalidate osr nmethod before acquiring the patching lock since
1438     // they both acquire leaf locks and we don't want a deadlock.
1439     // This logic is equivalent to the logic below for patching the
1440     // verified entry point of regular methods.
1441     if (is_osr_method()) {
1442       // this effectively makes the osr nmethod not entrant
1443       invalidate_osr_method();
1444     }
1445 
1446     // Enter critical section.  Does not block for safepoint.
1447     MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1448 
1449     if (_state == state) {
1450       // another thread already performed this transition so nothing
1451       // to do, but return false to indicate this.
1452       return false;
1453     }
1454 
1455     // The caller can be calling the method statically or through an inline
1456     // cache call.
1457     if (!is_osr_method() && !is_not_entrant()) {
1458       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1459                   SharedRuntime::get_handle_wrong_method_stub());
1460     }
1461 
1462     if (is_in_use() && update_recompile_counts()) {
1463       // It's a true state change, so mark the method as decompiled.
1464       // Do it only for transition from alive.
1465       inc_decompile_count();
1466     }
1467 
1468     // If the state is becoming a zombie, signal to unregister the nmethod with
1469     // the heap.
1470     // This nmethod may have already been unloaded during a full GC.
1471     if ((state == zombie) && !is_unloaded()) {
1472       nmethod_needs_unregister = true;
1473     }
1474 
1475     // Must happen before state change. Otherwise we have a race condition in
1476     // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1477     // transition its state from 'not_entrant' to 'zombie' without having to wait
1478     // for stack scanning.
1479     if (state == not_entrant) {
1480       mark_as_seen_on_stack();
1481       OrderAccess::storestore();
1482     }
1483 
1484     // Change state
1485     _state = state;
1486 
1487     // Log the transition once
1488     log_state_change();
1489 
1490     // Invalidate while holding the patching lock
1491     JVMCI_ONLY(maybe_invalidate_installed_code());
1492 
1493     // Remove nmethod from method.
1494     // We need to check if both the _code and _from_compiled_code_entry_point
1495     // refer to this nmethod because there is a race in setting these two fields
1496     // in Method* as seen in bugid 4947125.
1497     // If the vep() points to the zombie nmethod, the memory for the nmethod
1498     // could be flushed and the compiler and vtable stubs could still call
1499     // through it.
1500     if (method() != NULL && (method()->code() == this ||
1501                              method()->from_compiled_entry() == verified_entry_point())) {
1502       HandleMark hm;
1503       method()->clear_code();
1504     }
1505   } // leave critical region under Patching_lock
1506 
1507   // When the nmethod becomes zombie it is no longer alive so the
1508   // dependencies must be flushed.  nmethods in the not_entrant
1509   // state will be flushed later when the transition to zombie
1510   // happens or they get unloaded.
1511   if (state == zombie) {
1512     {
1513       // Flushing dependecies must be done before any possible
1514       // safepoint can sneak in, otherwise the oops used by the
1515       // dependency logic could have become stale.
1516       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1517       if (nmethod_needs_unregister) {
1518         Universe::heap()->unregister_nmethod(this);
1519 #ifdef JVMCI
1520         _jvmci_installed_code = NULL;
1521         _speculation_log = NULL;
1522 #endif
1523       }
1524       flush_dependencies(NULL);
1525     }
1526 
1527     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1528     // event and it hasn't already been reported for this nmethod then
1529     // report it now. The event may have been reported earilier if the GC
1530     // marked it for unloading). JvmtiDeferredEventQueue support means
1531     // we no longer go to a safepoint here.
1532     post_compiled_method_unload();
1533 
1534 #ifdef ASSERT
1535     // It's no longer safe to access the oops section since zombie
1536     // nmethods aren't scanned for GC.
1537     _oops_are_stale = true;
1538 #endif
1539      // the Method may be reclaimed by class unloading now that the
1540      // nmethod is in zombie state
1541     set_method(NULL);
1542   } else {
1543     assert(state == not_entrant, "other cases may need to be handled differently");
1544   }
1545 
1546   if (TraceCreateZombies) {
1547     ResourceMark m;
1548     tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null", (state == not_entrant) ? "not entrant" : "zombie");
1549   }
1550 
1551   NMethodSweeper::report_state_change(this);
1552   return true;
1553 }
1554 
1555 void nmethod::flush() {
1556   // Note that there are no valid oops in the nmethod anymore.
1557   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1558   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1559 
1560   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1561   assert_locked_or_safepoint(CodeCache_lock);
1562 
1563   // completely deallocate this method
1564   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
1565   if (PrintMethodFlushing) {
1566     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
1567                   "/Free CodeCache:" SIZE_FORMAT "Kb",
1568                   _compile_id, p2i(this), CodeCache::blob_count(),
1569                   CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
1570   }
1571 
1572   // We need to deallocate any ExceptionCache data.
1573   // Note that we do not need to grab the nmethod lock for this, it
1574   // better be thread safe if we're disposing of it!
1575   ExceptionCache* ec = exception_cache();
1576   set_exception_cache(NULL);
1577   while(ec != NULL) {
1578     ExceptionCache* next = ec->next();
1579     delete ec;
1580     ec = next;
1581   }
1582 
1583   if (on_scavenge_root_list()) {
1584     CodeCache::drop_scavenge_root_nmethod(this);
1585   }
1586 
1587 #ifdef SHARK
1588   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1589 #endif // SHARK
1590 
1591   ((CodeBlob*)(this))->flush();
1592 
1593   CodeCache::free(this);
1594 }
1595 
1596 //
1597 // Notify all classes this nmethod is dependent on that it is no
1598 // longer dependent. This should only be called in two situations.
1599 // First, when a nmethod transitions to a zombie all dependents need
1600 // to be clear.  Since zombification happens at a safepoint there's no
1601 // synchronization issues.  The second place is a little more tricky.
1602 // During phase 1 of mark sweep class unloading may happen and as a
1603 // result some nmethods may get unloaded.  In this case the flushing
1604 // of dependencies must happen during phase 1 since after GC any
1605 // dependencies in the unloaded nmethod won't be updated, so
1606 // traversing the dependency information in unsafe.  In that case this
1607 // function is called with a non-NULL argument and this function only
1608 // notifies instanceKlasses that are reachable
1609 
1610 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1611   assert_locked_or_safepoint(CodeCache_lock);
1612   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1613   "is_alive is non-NULL if and only if we are called during GC");
1614   if (!has_flushed_dependencies()) {
1615     set_has_flushed_dependencies();
1616     for (Dependencies::DepStream deps(this); deps.next(); ) {
1617       if (deps.type() == Dependencies::call_site_target_value) {
1618         // CallSite dependencies are managed on per-CallSite instance basis.
1619         oop call_site = deps.argument_oop(0);
1620         MethodHandles::remove_dependent_nmethod(call_site, this);
1621       } else {
1622         Klass* klass = deps.context_type();
1623         if (klass == NULL) {
1624           continue;  // ignore things like evol_method
1625         }
1626         // During GC the is_alive closure is non-NULL, and is used to
1627         // determine liveness of dependees that need to be updated.
1628         if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
1629           // The GC defers deletion of this entry, since there might be multiple threads
1630           // iterating over the _dependencies graph. Other call paths are single-threaded
1631           // and may delete it immediately.
1632           bool delete_immediately = is_alive == NULL;
1633           InstanceKlass::cast(klass)->remove_dependent_nmethod(this, delete_immediately);
1634         }
1635       }
1636     }
1637   }
1638 }
1639 
1640 
1641 // If this oop is not live, the nmethod can be unloaded.
1642 bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
1643   assert(root != NULL, "just checking");
1644   oop obj = *root;
1645   if (obj == NULL || is_alive->do_object_b(obj)) {
1646       return false;
1647   }
1648 
1649   // If ScavengeRootsInCode is true, an nmethod might be unloaded
1650   // simply because one of its constant oops has gone dead.
1651   // No actual classes need to be unloaded in order for this to occur.
1652   assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
1653   make_unloaded(is_alive, obj);
1654   return true;
1655 }
1656 
1657 // ------------------------------------------------------------------
1658 // post_compiled_method_load_event
1659 // new method for install_code() path
1660 // Transfer information from compilation to jvmti
1661 void nmethod::post_compiled_method_load_event() {
1662 
1663   Method* moop = method();
1664   HOTSPOT_COMPILED_METHOD_LOAD(
1665       (char *) moop->klass_name()->bytes(),
1666       moop->klass_name()->utf8_length(),
1667       (char *) moop->name()->bytes(),
1668       moop->name()->utf8_length(),
1669       (char *) moop->signature()->bytes(),
1670       moop->signature()->utf8_length(),
1671       insts_begin(), insts_size());
1672 
1673   if (JvmtiExport::should_post_compiled_method_load() ||
1674       JvmtiExport::should_post_compiled_method_unload()) {
1675     get_and_cache_jmethod_id();
1676   }
1677 
1678   if (JvmtiExport::should_post_compiled_method_load()) {
1679     // Let the Service thread (which is a real Java thread) post the event
1680     MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1681     JvmtiDeferredEventQueue::enqueue(
1682       JvmtiDeferredEvent::compiled_method_load_event(this));
1683   }
1684 }
1685 
1686 jmethodID nmethod::get_and_cache_jmethod_id() {
1687   if (_jmethod_id == NULL) {
1688     // Cache the jmethod_id since it can no longer be looked up once the
1689     // method itself has been marked for unloading.
1690     _jmethod_id = method()->jmethod_id();
1691   }
1692   return _jmethod_id;
1693 }
1694 
1695 void nmethod::post_compiled_method_unload() {
1696   if (unload_reported()) {
1697     // During unloading we transition to unloaded and then to zombie
1698     // and the unloading is reported during the first transition.
1699     return;
1700   }
1701 
1702   assert(_method != NULL && !is_unloaded(), "just checking");
1703   DTRACE_METHOD_UNLOAD_PROBE(method());
1704 
1705   // If a JVMTI agent has enabled the CompiledMethodUnload event then
1706   // post the event. Sometime later this nmethod will be made a zombie
1707   // by the sweeper but the Method* will not be valid at that point.
1708   // If the _jmethod_id is null then no load event was ever requested
1709   // so don't bother posting the unload.  The main reason for this is
1710   // that the jmethodID is a weak reference to the Method* so if
1711   // it's being unloaded there's no way to look it up since the weak
1712   // ref will have been cleared.
1713   if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1714     assert(!unload_reported(), "already unloaded");
1715     JvmtiDeferredEvent event =
1716       JvmtiDeferredEvent::compiled_method_unload_event(this,
1717           _jmethod_id, insts_begin());
1718     if (SafepointSynchronize::is_at_safepoint()) {
1719       // Don't want to take the queueing lock. Add it as pending and
1720       // it will get enqueued later.
1721       JvmtiDeferredEventQueue::add_pending_event(event);
1722     } else {
1723       MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1724       JvmtiDeferredEventQueue::enqueue(event);
1725     }
1726   }
1727 
1728   // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1729   // any time. As the nmethod is being unloaded now we mark it has
1730   // having the unload event reported - this will ensure that we don't
1731   // attempt to report the event in the unlikely scenario where the
1732   // event is enabled at the time the nmethod is made a zombie.
1733   set_unload_reported();
1734 }
1735 
1736 void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
1737   if (ic->is_icholder_call()) {
1738     // The only exception is compiledICHolder oops which may
1739     // yet be marked below. (We check this further below).
1740     CompiledICHolder* cichk_oop = ic->cached_icholder();
1741 
1742     if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1743         cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1744       return;
1745     }
1746   } else {
1747     Metadata* ic_oop = ic->cached_metadata();
1748     if (ic_oop != NULL) {
1749       if (ic_oop->is_klass()) {
1750         if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1751           return;
1752         }
1753       } else if (ic_oop->is_method()) {
1754         if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1755           return;
1756         }
1757       } else {
1758         ShouldNotReachHere();
1759       }
1760     }
1761   }
1762 
1763   ic->set_to_clean();
1764 }
1765 
1766 // This is called at the end of the strong tracing/marking phase of a
1767 // GC to unload an nmethod if it contains otherwise unreachable
1768 // oops.
1769 
1770 void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1771   // Make sure the oop's ready to receive visitors
1772   assert(!is_zombie() && !is_unloaded(),
1773          "should not call follow on zombie or unloaded nmethod");
1774 
1775   // If the method is not entrant then a JMP is plastered over the
1776   // first few bytes.  If an oop in the old code was there, that oop
1777   // should not get GC'd.  Skip the first few bytes of oops on
1778   // not-entrant methods.
1779   address low_boundary = verified_entry_point();
1780   if (is_not_entrant()) {
1781     low_boundary += NativeJump::instruction_size;
1782     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1783     // (See comment above.)
1784   }
1785 
1786   // The RedefineClasses() API can cause the class unloading invariant
1787   // to no longer be true. See jvmtiExport.hpp for details.
1788   // Also, leave a debugging breadcrumb in local flag.
1789   if (JvmtiExport::has_redefined_a_class()) {
1790     // This set of the unloading_occurred flag is done before the
1791     // call to post_compiled_method_unload() so that the unloading
1792     // of this nmethod is reported.
1793     unloading_occurred = true;
1794   }
1795 
1796   // Exception cache
1797   clean_exception_cache(is_alive);
1798 
1799   // If class unloading occurred we first iterate over all inline caches and
1800   // clear ICs where the cached oop is referring to an unloaded klass or method.
1801   // The remaining live cached oops will be traversed in the relocInfo::oop_type
1802   // iteration below.
1803   if (unloading_occurred) {
1804     RelocIterator iter(this, low_boundary);
1805     while(iter.next()) {
1806       if (iter.type() == relocInfo::virtual_call_type) {
1807         CompiledIC *ic = CompiledIC_at(&iter);
1808         clean_ic_if_metadata_is_dead(ic, is_alive);
1809       }
1810     }
1811   }
1812 
1813   // Compiled code
1814   {
1815   RelocIterator iter(this, low_boundary);
1816   while (iter.next()) {
1817     if (iter.type() == relocInfo::oop_type) {
1818       oop_Relocation* r = iter.oop_reloc();
1819       // In this loop, we must only traverse those oops directly embedded in
1820       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1821       assert(1 == (r->oop_is_immediate()) +
1822                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1823              "oop must be found in exactly one place");
1824       if (r->oop_is_immediate() && r->oop_value() != NULL) {
1825         if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1826           return;
1827         }
1828       }
1829     }
1830   }
1831   }
1832 
1833 
1834   // Scopes
1835   for (oop* p = oops_begin(); p < oops_end(); p++) {
1836     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1837     if (can_unload(is_alive, p, unloading_occurred)) {
1838       return;
1839     }
1840   }
1841 
1842 #if INCLUDE_JVMCI
1843   // Follow JVMCI method
1844   BarrierSet* bs = Universe::heap()->barrier_set();
1845   if (_jvmci_installed_code != NULL) {
1846     if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
1847       if (!is_alive->do_object_b(_jvmci_installed_code)) {
1848         clear_jvmci_installed_code();
1849       }
1850     } else {
1851       if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
1852         return;
1853       }
1854     }
1855   }
1856 
1857   if (_speculation_log != NULL) {
1858     if (!is_alive->do_object_b(_speculation_log)) {
1859       bs->write_ref_nmethod_pre(&_speculation_log, this);
1860       _speculation_log = NULL;
1861       bs->write_ref_nmethod_post(&_speculation_log, this);
1862     }
1863   }
1864 #endif
1865 
1866 
1867   // Ensure that all metadata is still alive
1868   verify_metadata_loaders(low_boundary, is_alive);
1869 }
1870 
1871 template <class CompiledICorStaticCall>
1872 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
1873   // Ok, to lookup references to zombies here
1874   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
1875   if (cb != NULL && cb->is_nmethod()) {
1876     nmethod* nm = (nmethod*)cb;
1877 
1878     if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
1879       // The nmethod has not been processed yet.
1880       return true;
1881     }
1882 
1883     // Clean inline caches pointing to both zombie and not_entrant methods
1884     if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1885       ic->set_to_clean();
1886       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
1887     }
1888   }
1889 
1890   return false;
1891 }
1892 
1893 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
1894   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
1895 }
1896 
1897 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
1898   return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
1899 }
1900 
1901 bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
1902   assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
1903 
1904   oop_Relocation* r = iter_at_oop->oop_reloc();
1905   // Traverse those oops directly embedded in the code.
1906   // Other oops (oop_index>0) are seen as part of scopes_oops.
1907   assert(1 == (r->oop_is_immediate()) +
1908          (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1909          "oop must be found in exactly one place");
1910   if (r->oop_is_immediate() && r->oop_value() != NULL) {
1911     // Unload this nmethod if the oop is dead.
1912     if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1913       return true;;
1914     }
1915   }
1916 
1917   return false;
1918 }
1919 
1920 
1921 bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
1922   ResourceMark rm;
1923 
1924   // Make sure the oop's ready to receive visitors
1925   assert(!is_zombie() && !is_unloaded(),
1926          "should not call follow on zombie or unloaded nmethod");
1927 
1928   // If the method is not entrant then a JMP is plastered over the
1929   // first few bytes.  If an oop in the old code was there, that oop
1930   // should not get GC'd.  Skip the first few bytes of oops on
1931   // not-entrant methods.
1932   address low_boundary = verified_entry_point();
1933   if (is_not_entrant()) {
1934     low_boundary += NativeJump::instruction_size;
1935     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1936     // (See comment above.)
1937   }
1938 
1939   // The RedefineClasses() API can cause the class unloading invariant
1940   // to no longer be true. See jvmtiExport.hpp for details.
1941   // Also, leave a debugging breadcrumb in local flag.
1942   if (JvmtiExport::has_redefined_a_class()) {
1943     // This set of the unloading_occurred flag is done before the
1944     // call to post_compiled_method_unload() so that the unloading
1945     // of this nmethod is reported.
1946     unloading_occurred = true;
1947   }
1948 
1949   // Exception cache
1950   clean_exception_cache(is_alive);
1951 
1952   bool is_unloaded = false;
1953   bool postponed = false;
1954 
1955   RelocIterator iter(this, low_boundary);
1956   while(iter.next()) {
1957 
1958     switch (iter.type()) {
1959 
1960     case relocInfo::virtual_call_type:
1961       if (unloading_occurred) {
1962         // If class unloading occurred we first iterate over all inline caches and
1963         // clear ICs where the cached oop is referring to an unloaded klass or method.
1964         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
1965       }
1966 
1967       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1968       break;
1969 
1970     case relocInfo::opt_virtual_call_type:
1971       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1972       break;
1973 
1974     case relocInfo::static_call_type:
1975       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1976       break;
1977 
1978     case relocInfo::oop_type:
1979       if (!is_unloaded) {
1980         is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
1981       }
1982       break;
1983 
1984     case relocInfo::metadata_type:
1985       break; // nothing to do.
1986     }
1987   }
1988 
1989   if (is_unloaded) {
1990     return postponed;
1991   }
1992 
1993   // Scopes
1994   for (oop* p = oops_begin(); p < oops_end(); p++) {
1995     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1996     if (can_unload(is_alive, p, unloading_occurred)) {
1997       is_unloaded = true;
1998       break;
1999     }
2000   }
2001 
2002   if (is_unloaded) {
2003     return postponed;
2004   }
2005 
2006 #if INCLUDE_JVMCI
2007   // Follow JVMCI method
2008   BarrierSet* bs = Universe::heap()->barrier_set();
2009   if (_jvmci_installed_code != NULL) {
2010     if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
2011       if (!is_alive->do_object_b(_jvmci_installed_code)) {
2012         clear_jvmci_installed_code();
2013       }
2014     } else {
2015       if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
2016         is_unloaded = true;
2017       }
2018     }
2019   }
2020 
2021   if (_speculation_log != NULL) {
2022     if (!is_alive->do_object_b(_speculation_log)) {
2023       bs->write_ref_nmethod_pre(&_speculation_log, this);
2024       _speculation_log = NULL;
2025       bs->write_ref_nmethod_post(&_speculation_log, this);
2026     }
2027   }
2028 #endif
2029 
2030   // Ensure that all metadata is still alive
2031   verify_metadata_loaders(low_boundary, is_alive);
2032 
2033   return postponed;
2034 }
2035 
2036 void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
2037   ResourceMark rm;
2038 
2039   // Make sure the oop's ready to receive visitors
2040   assert(!is_zombie(),
2041          "should not call follow on zombie nmethod");
2042 
2043   // If the method is not entrant then a JMP is plastered over the
2044   // first few bytes.  If an oop in the old code was there, that oop
2045   // should not get GC'd.  Skip the first few bytes of oops on
2046   // not-entrant methods.
2047   address low_boundary = verified_entry_point();
2048   if (is_not_entrant()) {
2049     low_boundary += NativeJump::instruction_size;
2050     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
2051     // (See comment above.)
2052   }
2053 
2054   RelocIterator iter(this, low_boundary);
2055   while(iter.next()) {
2056 
2057     switch (iter.type()) {
2058 
2059     case relocInfo::virtual_call_type:
2060       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
2061       break;
2062 
2063     case relocInfo::opt_virtual_call_type:
2064       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
2065       break;
2066 
2067     case relocInfo::static_call_type:
2068       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
2069       break;
2070     }
2071   }
2072 }
2073 
2074 #ifdef ASSERT
2075 
2076 class CheckClass : AllStatic {
2077   static BoolObjectClosure* _is_alive;
2078 
2079   // Check class_loader is alive for this bit of metadata.
2080   static void check_class(Metadata* md) {
2081     Klass* klass = NULL;
2082     if (md->is_klass()) {
2083       klass = ((Klass*)md);
2084     } else if (md->is_method()) {
2085       klass = ((Method*)md)->method_holder();
2086     } else if (md->is_methodData()) {
2087       klass = ((MethodData*)md)->method()->method_holder();
2088     } else {
2089       md->print();
2090       ShouldNotReachHere();
2091     }
2092     assert(klass->is_loader_alive(_is_alive), "must be alive");
2093   }
2094  public:
2095   static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
2096     assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
2097     _is_alive = is_alive;
2098     nm->metadata_do(check_class);
2099   }
2100 };
2101 
2102 // This is called during a safepoint so can use static data
2103 BoolObjectClosure* CheckClass::_is_alive = NULL;
2104 #endif // ASSERT
2105 
2106 
2107 // Processing of oop references should have been sufficient to keep
2108 // all strong references alive.  Any weak references should have been
2109 // cleared as well.  Visit all the metadata and ensure that it's
2110 // really alive.
2111 void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
2112 #ifdef ASSERT
2113     RelocIterator iter(this, low_boundary);
2114     while (iter.next()) {
2115     // static_stub_Relocations may have dangling references to
2116     // Method*s so trim them out here.  Otherwise it looks like
2117     // compiled code is maintaining a link to dead metadata.
2118     address static_call_addr = NULL;
2119     if (iter.type() == relocInfo::opt_virtual_call_type) {
2120       CompiledIC* cic = CompiledIC_at(&iter);
2121       if (!cic->is_call_to_interpreted()) {
2122         static_call_addr = iter.addr();
2123       }
2124     } else if (iter.type() == relocInfo::static_call_type) {
2125       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
2126       if (!csc->is_call_to_interpreted()) {
2127         static_call_addr = iter.addr();
2128       }
2129     }
2130     if (static_call_addr != NULL) {
2131       RelocIterator sciter(this, low_boundary);
2132       while (sciter.next()) {
2133         if (sciter.type() == relocInfo::static_stub_type &&
2134             sciter.static_stub_reloc()->static_call() == static_call_addr) {
2135           sciter.static_stub_reloc()->clear_inline_cache();
2136         }
2137       }
2138     }
2139   }
2140   // Check that the metadata embedded in the nmethod is alive
2141   CheckClass::do_check_class(is_alive, this);
2142 #endif
2143 }
2144 
2145 
2146 // Iterate over metadata calling this function.   Used by RedefineClasses
2147 void nmethod::metadata_do(void f(Metadata*)) {
2148   address low_boundary = verified_entry_point();
2149   if (is_not_entrant()) {
2150     low_boundary += NativeJump::instruction_size;
2151     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
2152     // (See comment above.)
2153   }
2154   {
2155     // Visit all immediate references that are embedded in the instruction stream.
2156     RelocIterator iter(this, low_boundary);
2157     while (iter.next()) {
2158       if (iter.type() == relocInfo::metadata_type ) {
2159         metadata_Relocation* r = iter.metadata_reloc();
2160         // In this metadata, we must only follow those metadatas directly embedded in
2161         // the code.  Other metadatas (oop_index>0) are seen as part of
2162         // the metadata section below.
2163         assert(1 == (r->metadata_is_immediate()) +
2164                (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2165                "metadata must be found in exactly one place");
2166         if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
2167           Metadata* md = r->metadata_value();
2168           if (md != _method) f(md);
2169         }
2170       } else if (iter.type() == relocInfo::virtual_call_type) {
2171         // Check compiledIC holders associated with this nmethod
2172         CompiledIC *ic = CompiledIC_at(&iter);
2173         if (ic->is_icholder_call()) {
2174           CompiledICHolder* cichk = ic->cached_icholder();
2175           f(cichk->holder_method());
2176           f(cichk->holder_klass());
2177         } else {
2178           Metadata* ic_oop = ic->cached_metadata();
2179           if (ic_oop != NULL) {
2180             f(ic_oop);
2181           }
2182         }
2183       }
2184     }
2185   }
2186 
2187   // Visit the metadata section
2188   for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2189     if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
2190     Metadata* md = *p;
2191     f(md);
2192   }
2193 
2194   // Visit metadata not embedded in the other places.
2195   if (_method != NULL) f(_method);
2196 }
2197 
2198 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
2199   // make sure the oops ready to receive visitors
2200   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
2201   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
2202 
2203   // If the method is not entrant or zombie then a JMP is plastered over the
2204   // first few bytes.  If an oop in the old code was there, that oop
2205   // should not get GC'd.  Skip the first few bytes of oops on
2206   // not-entrant methods.
2207   address low_boundary = verified_entry_point();
2208   if (is_not_entrant()) {
2209     low_boundary += NativeJump::instruction_size;
2210     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
2211     // (See comment above.)
2212   }
2213 
2214 #if INCLUDE_JVMCI
2215   if (_jvmci_installed_code != NULL) {
2216     f->do_oop((oop*) &_jvmci_installed_code);
2217   }
2218   if (_speculation_log != NULL) {
2219     f->do_oop((oop*) &_speculation_log);
2220   }
2221 #endif
2222 
2223   RelocIterator iter(this, low_boundary);
2224 
2225   while (iter.next()) {
2226     if (iter.type() == relocInfo::oop_type ) {
2227       oop_Relocation* r = iter.oop_reloc();
2228       // In this loop, we must only follow those oops directly embedded in
2229       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
2230       assert(1 == (r->oop_is_immediate()) +
2231                    (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2232              "oop must be found in exactly one place");
2233       if (r->oop_is_immediate() && r->oop_value() != NULL) {
2234         f->do_oop(r->oop_addr());
2235       }
2236     }
2237   }
2238 
2239   // Scopes
2240   // This includes oop constants not inlined in the code stream.
2241   for (oop* p = oops_begin(); p < oops_end(); p++) {
2242     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
2243     f->do_oop(p);
2244   }
2245 }
2246 
2247 #define NMETHOD_SENTINEL ((nmethod*)badAddress)
2248 
2249 nmethod* volatile nmethod::_oops_do_mark_nmethods;
2250 
2251 // An nmethod is "marked" if its _mark_link is set non-null.
2252 // Even if it is the end of the linked list, it will have a non-null link value,
2253 // as long as it is on the list.
2254 // This code must be MP safe, because it is used from parallel GC passes.
2255 bool nmethod::test_set_oops_do_mark() {
2256   assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
2257   nmethod* observed_mark_link = _oops_do_mark_link;
2258   if (observed_mark_link == NULL) {
2259     // Claim this nmethod for this thread to mark.
2260     observed_mark_link = (nmethod*)
2261       Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
2262     if (observed_mark_link == NULL) {
2263 
2264       // Atomically append this nmethod (now claimed) to the head of the list:
2265       nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
2266       for (;;) {
2267         nmethod* required_mark_nmethods = observed_mark_nmethods;
2268         _oops_do_mark_link = required_mark_nmethods;
2269         observed_mark_nmethods = (nmethod*)
2270           Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
2271         if (observed_mark_nmethods == required_mark_nmethods)
2272           break;
2273       }
2274       // Mark was clear when we first saw this guy.
2275       if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
2276       return false;
2277     }
2278   }
2279   // On fall through, another racing thread marked this nmethod before we did.
2280   return true;
2281 }
2282 
2283 void nmethod::oops_do_marking_prologue() {
2284   if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
2285   assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
2286   // We use cmpxchg_ptr instead of regular assignment here because the user
2287   // may fork a bunch of threads, and we need them all to see the same state.
2288   void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
2289   guarantee(observed == NULL, "no races in this sequential code");
2290 }
2291 
2292 void nmethod::oops_do_marking_epilogue() {
2293   assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
2294   nmethod* cur = _oops_do_mark_nmethods;
2295   while (cur != NMETHOD_SENTINEL) {
2296     assert(cur != NULL, "not NULL-terminated");
2297     nmethod* next = cur->_oops_do_mark_link;
2298     cur->_oops_do_mark_link = NULL;
2299     DEBUG_ONLY(cur->verify_oop_relocations());
2300     NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
2301     cur = next;
2302   }
2303   void* required = _oops_do_mark_nmethods;
2304   void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
2305   guarantee(observed == required, "no races in this sequential code");
2306   if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
2307 }
2308 
2309 class DetectScavengeRoot: public OopClosure {
2310   bool     _detected_scavenge_root;
2311 public:
2312   DetectScavengeRoot() : _detected_scavenge_root(false)
2313   { NOT_PRODUCT(_print_nm = NULL); }
2314   bool detected_scavenge_root() { return _detected_scavenge_root; }
2315   virtual void do_oop(oop* p) {
2316     if ((*p) != NULL && (*p)->is_scavengable()) {
2317       NOT_PRODUCT(maybe_print(p));
2318       _detected_scavenge_root = true;
2319     }
2320   }
2321   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2322 
2323 #ifndef PRODUCT
2324   nmethod* _print_nm;
2325   void maybe_print(oop* p) {
2326     if (_print_nm == NULL)  return;
2327     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
2328     tty->print_cr("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ")",
2329                   p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
2330                   p2i(*p), p2i(p));
2331     (*p)->print();
2332   }
2333 #endif //PRODUCT
2334 };
2335 
2336 bool nmethod::detect_scavenge_root_oops() {
2337   DetectScavengeRoot detect_scavenge_root;
2338   NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
2339   oops_do(&detect_scavenge_root);
2340   return detect_scavenge_root.detected_scavenge_root();
2341 }
2342 
2343 // Method that knows how to preserve outgoing arguments at call. This method must be
2344 // called with a frame corresponding to a Java invoke
2345 void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
2346 #ifndef SHARK
2347   if (method() != NULL && !method()->is_native()) {
2348     address pc = fr.pc();
2349     SimpleScopeDesc ssd(this, pc);
2350     Bytecode_invoke call(ssd.method(), ssd.bci());
2351     bool has_receiver = call.has_receiver();
2352     bool has_appendix = call.has_appendix();
2353     Symbol* signature = call.signature();
2354 
2355     // The method attached by JIT-compilers should be used, if present.
2356     // Bytecode can be inaccurate in such case.
2357     Method* callee = attached_method_before_pc(pc);
2358     if (callee != NULL) {
2359       has_receiver = !(callee->access_flags().is_static());
2360       has_appendix = false;
2361       signature = callee->signature();
2362     }
2363 
2364     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
2365   }
2366 #endif // !SHARK
2367 }
2368 
2369 inline bool includes(void* p, void* from, void* to) {
2370   return from <= p && p < to;
2371 }
2372 
2373 
2374 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
2375   assert(count >= 2, "must be sentinel values, at least");
2376 
2377 #ifdef ASSERT
2378   // must be sorted and unique; we do a binary search in find_pc_desc()
2379   int prev_offset = pcs[0].pc_offset();
2380   assert(prev_offset == PcDesc::lower_offset_limit,
2381          "must start with a sentinel");
2382   for (int i = 1; i < count; i++) {
2383     int this_offset = pcs[i].pc_offset();
2384     assert(this_offset > prev_offset, "offsets must be sorted");
2385     prev_offset = this_offset;
2386   }
2387   assert(prev_offset == PcDesc::upper_offset_limit,
2388          "must end with a sentinel");
2389 #endif //ASSERT
2390 
2391   // Search for MethodHandle invokes and tag the nmethod.
2392   for (int i = 0; i < count; i++) {
2393     if (pcs[i].is_method_handle_invoke()) {
2394       set_has_method_handle_invokes(true);
2395       break;
2396     }
2397   }
2398   assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
2399 
2400   int size = count * sizeof(PcDesc);
2401   assert(scopes_pcs_size() >= size, "oob");
2402   memcpy(scopes_pcs_begin(), pcs, size);
2403 
2404   // Adjust the final sentinel downward.
2405   PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2406   assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2407   last_pc->set_pc_offset(content_size() + 1);
2408   for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2409     // Fill any rounding gaps with copies of the last record.
2410     last_pc[1] = last_pc[0];
2411   }
2412   // The following assert could fail if sizeof(PcDesc) is not
2413   // an integral multiple of oopSize (the rounding term).
2414   // If it fails, change the logic to always allocate a multiple
2415   // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2416   assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2417 }
2418 
2419 void nmethod::copy_scopes_data(u_char* buffer, int size) {
2420   assert(scopes_data_size() >= size, "oob");
2421   memcpy(scopes_data_begin(), buffer, size);
2422 }
2423 
2424 // When using JVMCI the address might be off by the size of a call instruction.
2425 bool nmethod::is_deopt_entry(address pc) {
2426   return pc == deopt_handler_begin()
2427 #if INCLUDE_JVMCI
2428     || pc == (deopt_handler_begin() + NativeCall::instruction_size)
2429 #endif
2430     ;
2431 }
2432 
2433 #ifdef ASSERT
2434 static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
2435   PcDesc* lower = nm->scopes_pcs_begin();
2436   PcDesc* upper = nm->scopes_pcs_end();
2437   lower += 1; // exclude initial sentinel
2438   PcDesc* res = NULL;
2439   for (PcDesc* p = lower; p < upper; p++) {
2440     NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
2441     if (match_desc(p, pc_offset, approximate)) {
2442       if (res == NULL)
2443         res = p;
2444       else
2445         res = (PcDesc*) badAddress;
2446     }
2447   }
2448   return res;
2449 }
2450 #endif
2451 
2452 
2453 // Finds a PcDesc with real-pc equal to "pc"
2454 PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
2455   address base_address = code_begin();
2456   if ((pc < base_address) ||
2457       (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
2458     return NULL;  // PC is wildly out of range
2459   }
2460   int pc_offset = (int) (pc - base_address);
2461 
2462   // Check the PcDesc cache if it contains the desired PcDesc
2463   // (This as an almost 100% hit rate.)
2464   PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
2465   if (res != NULL) {
2466     assert(res == linear_search(this, pc_offset, approximate), "cache ok");
2467     return res;
2468   }
2469 
2470   // Fallback algorithm: quasi-linear search for the PcDesc
2471   // Find the last pc_offset less than the given offset.
2472   // The successor must be the required match, if there is a match at all.
2473   // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
2474   PcDesc* lower = scopes_pcs_begin();
2475   PcDesc* upper = scopes_pcs_end();
2476   upper -= 1; // exclude final sentinel
2477   if (lower >= upper)  return NULL;  // native method; no PcDescs at all
2478 
2479 #define assert_LU_OK \
2480   /* invariant on lower..upper during the following search: */ \
2481   assert(lower->pc_offset() <  pc_offset, "sanity"); \
2482   assert(upper->pc_offset() >= pc_offset, "sanity")
2483   assert_LU_OK;
2484 
2485   // Use the last successful return as a split point.
2486   PcDesc* mid = _pc_desc_cache.last_pc_desc();
2487   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2488   if (mid->pc_offset() < pc_offset) {
2489     lower = mid;
2490   } else {
2491     upper = mid;
2492   }
2493 
2494   // Take giant steps at first (4096, then 256, then 16, then 1)
2495   const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
2496   const int RADIX = (1 << LOG2_RADIX);
2497   for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
2498     while ((mid = lower + step) < upper) {
2499       assert_LU_OK;
2500       NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2501       if (mid->pc_offset() < pc_offset) {
2502         lower = mid;
2503       } else {
2504         upper = mid;
2505         break;
2506       }
2507     }
2508     assert_LU_OK;
2509   }
2510 
2511   // Sneak up on the value with a linear search of length ~16.
2512   while (true) {
2513     assert_LU_OK;
2514     mid = lower + 1;
2515     NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2516     if (mid->pc_offset() < pc_offset) {
2517       lower = mid;
2518     } else {
2519       upper = mid;
2520       break;
2521     }
2522   }
2523 #undef assert_LU_OK
2524 
2525   if (match_desc(upper, pc_offset, approximate)) {
2526     assert(upper == linear_search(this, pc_offset, approximate), "search ok");
2527     _pc_desc_cache.add_pc_desc(upper);
2528     return upper;
2529   } else {
2530     assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
2531     return NULL;
2532   }
2533 }
2534 
2535 
2536 void nmethod::check_all_dependencies(DepChange& changes) {
2537   // Checked dependencies are allocated into this ResourceMark
2538   ResourceMark rm;
2539 
2540   // Turn off dependency tracing while actually testing dependencies.
2541   NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2542 
2543   typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
2544                             &DependencySignature::equals, 11027> DepTable;
2545 
2546   DepTable* table = new DepTable();
2547 
2548   // Iterate over live nmethods and check dependencies of all nmethods that are not
2549   // marked for deoptimization. A particular dependency is only checked once.
2550   NMethodIterator iter;
2551   while(iter.next()) {
2552     nmethod* nm = iter.method();
2553     // Only notify for live nmethods
2554     if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
2555       for (Dependencies::DepStream deps(nm); deps.next(); ) {
2556         // Construct abstraction of a dependency.
2557         DependencySignature* current_sig = new DependencySignature(deps);
2558 
2559         // Determine if dependency is already checked. table->put(...) returns
2560         // 'true' if the dependency is added (i.e., was not in the hashtable).
2561         if (table->put(*current_sig, 1)) {
2562           if (deps.check_dependency() != NULL) {
2563             // Dependency checking failed. Print out information about the failed
2564             // dependency and finally fail with an assert. We can fail here, since
2565             // dependency checking is never done in a product build.
2566             tty->print_cr("Failed dependency:");
2567             changes.print();
2568             nm->print();
2569             nm->print_dependencies();
2570             assert(false, "Should have been marked for deoptimization");
2571           }
2572         }
2573       }
2574     }
2575   }
2576 }
2577 
2578 bool nmethod::check_dependency_on(DepChange& changes) {
2579   // What has happened:
2580   // 1) a new class dependee has been added
2581   // 2) dependee and all its super classes have been marked
2582   bool found_check = false;  // set true if we are upset
2583   for (Dependencies::DepStream deps(this); deps.next(); ) {
2584     // Evaluate only relevant dependencies.
2585     if (deps.spot_check_dependency_at(changes) != NULL) {
2586       found_check = true;
2587       NOT_DEBUG(break);
2588     }
2589   }
2590   return found_check;
2591 }
2592 
2593 bool nmethod::is_evol_dependent_on(Klass* dependee) {
2594   InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
2595   Array<Method*>* dependee_methods = dependee_ik->methods();
2596   for (Dependencies::DepStream deps(this); deps.next(); ) {
2597     if (deps.type() == Dependencies::evol_method) {
2598       Method* method = deps.method_argument(0);
2599       for (int j = 0; j < dependee_methods->length(); j++) {
2600         if (dependee_methods->at(j) == method) {
2601           // RC_TRACE macro has an embedded ResourceMark
2602           RC_TRACE(0x01000000,
2603             ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
2604             _method->method_holder()->external_name(),
2605             _method->name()->as_C_string(),
2606             _method->signature()->as_C_string(), compile_id(),
2607             method->method_holder()->external_name(),
2608             method->name()->as_C_string(),
2609             method->signature()->as_C_string()));
2610           if (TraceDependencies || LogCompilation)
2611             deps.log_dependency(dependee);
2612           return true;
2613         }
2614       }
2615     }
2616   }
2617   return false;
2618 }
2619 
2620 // Called from mark_for_deoptimization, when dependee is invalidated.
2621 bool nmethod::is_dependent_on_method(Method* dependee) {
2622   for (Dependencies::DepStream deps(this); deps.next(); ) {
2623     if (deps.type() != Dependencies::evol_method)
2624       continue;
2625     Method* method = deps.method_argument(0);
2626     if (method == dependee) return true;
2627   }
2628   return false;
2629 }
2630 
2631 
2632 bool nmethod::is_patchable_at(address instr_addr) {
2633   assert(insts_contains(instr_addr), "wrong nmethod used");
2634   if (is_zombie()) {
2635     // a zombie may never be patched
2636     return false;
2637   }
2638   return true;
2639 }
2640 
2641 
2642 address nmethod::continuation_for_implicit_exception(address pc) {
2643   // Exception happened outside inline-cache check code => we are inside
2644   // an active nmethod => use cpc to determine a return address
2645   int exception_offset = pc - code_begin();
2646   int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
2647 #ifdef ASSERT
2648   if (cont_offset == 0) {
2649     Thread* thread = Thread::current();
2650     ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
2651     HandleMark hm(thread);
2652     ResourceMark rm(thread);
2653     CodeBlob* cb = CodeCache::find_blob(pc);
2654     assert(cb != NULL && cb == this, "");
2655     ttyLocker ttyl;
2656     tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
2657     print();
2658     method()->print_codes();
2659     print_code();
2660     print_pcs();
2661   }
2662 #endif
2663   if (cont_offset == 0) {
2664     // Let the normal error handling report the exception
2665     return NULL;
2666   }
2667   return code_begin() + cont_offset;
2668 }
2669 
2670 
2671 
2672 void nmethod_init() {
2673   // make sure you didn't forget to adjust the filler fields
2674   assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2675 }
2676 
2677 
2678 //-------------------------------------------------------------------------------------------
2679 
2680 
2681 // QQQ might we make this work from a frame??
2682 nmethodLocker::nmethodLocker(address pc) {
2683   CodeBlob* cb = CodeCache::find_blob(pc);
2684   guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
2685   _nm = (nmethod*)cb;
2686   lock_nmethod(_nm);
2687 }
2688 
2689 // Only JvmtiDeferredEvent::compiled_method_unload_event()
2690 // should pass zombie_ok == true.
2691 void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
2692   if (nm == NULL)  return;
2693   Atomic::inc(&nm->_lock_count);
2694   assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
2695 }
2696 
2697 void nmethodLocker::unlock_nmethod(nmethod* nm) {
2698   if (nm == NULL)  return;
2699   Atomic::dec(&nm->_lock_count);
2700   assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
2701 }
2702 
2703 // -----------------------------------------------------------------------------
2704 // nmethod::get_deopt_original_pc
2705 //
2706 // Return the original PC for the given PC if:
2707 // (a) the given PC belongs to a nmethod and
2708 // (b) it is a deopt PC
2709 address nmethod::get_deopt_original_pc(const frame* fr) {
2710   if (fr->cb() == NULL)  return NULL;
2711 
2712   nmethod* nm = fr->cb()->as_nmethod_or_null();
2713   if (nm != NULL && nm->is_deopt_pc(fr->pc()))
2714     return nm->get_original_pc(fr);
2715 
2716   return NULL;
2717 }
2718 
2719 
2720 // -----------------------------------------------------------------------------
2721 // MethodHandle
2722 
2723 bool nmethod::is_method_handle_return(address return_pc) {
2724   if (!has_method_handle_invokes())  return false;
2725   PcDesc* pd = pc_desc_at(return_pc);
2726   if (pd == NULL)
2727     return false;
2728   return pd->is_method_handle_invoke();
2729 }
2730 
2731 
2732 // -----------------------------------------------------------------------------
2733 // Verification
2734 
2735 class VerifyOopsClosure: public OopClosure {
2736   nmethod* _nm;
2737   bool     _ok;
2738 public:
2739   VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
2740   bool ok() { return _ok; }
2741   virtual void do_oop(oop* p) {
2742     if ((*p) == NULL || (*p)->is_oop())  return;
2743     if (_ok) {
2744       _nm->print_nmethod(true);
2745       _ok = false;
2746     }
2747     tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
2748                   p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
2749   }
2750   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2751 };
2752 
2753 void nmethod::verify() {
2754 
2755   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2756   // seems odd.
2757 
2758   if (is_zombie() || is_not_entrant() || is_unloaded())
2759     return;
2760 
2761   // Make sure all the entry points are correctly aligned for patching.
2762   NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2763 
2764   // assert(method()->is_oop(), "must be valid");
2765 
2766   ResourceMark rm;
2767 
2768   if (!CodeCache::contains(this)) {
2769     fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
2770   }
2771 
2772   if(is_native_method() )
2773     return;
2774 
2775   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2776   if (nm != this) {
2777     fatal("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
2778   }
2779 
2780   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2781     if (! p->verify(this)) {
2782       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
2783     }
2784   }
2785 
2786   VerifyOopsClosure voc(this);
2787   oops_do(&voc);
2788   assert(voc.ok(), "embedded oops must be OK");
2789   verify_scavenge_root_oops();
2790 
2791   verify_scopes();
2792 }
2793 
2794 
2795 void nmethod::verify_interrupt_point(address call_site) {
2796   // Verify IC only when nmethod installation is finished.
2797   bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed
2798                       || !this->is_in_use();     // nmethod is installed, but not in 'in_use' state
2799   if (is_installed) {
2800     Thread *cur = Thread::current();
2801     if (CompiledIC_lock->owner() == cur ||
2802         ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
2803          SafepointSynchronize::is_at_safepoint())) {
2804       CompiledIC_at(this, call_site);
2805       CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
2806     } else {
2807       MutexLocker ml_verify (CompiledIC_lock);
2808       CompiledIC_at(this, call_site);
2809     }
2810   }
2811 
2812   PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
2813   assert(pd != NULL, "PcDesc must exist");
2814   for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
2815                                      pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
2816                                      pd->return_oop());
2817        !sd->is_top(); sd = sd->sender()) {
2818     sd->verify();
2819   }
2820 }
2821 
2822 void nmethod::verify_scopes() {
2823   if( !method() ) return;       // Runtime stubs have no scope
2824   if (method()->is_native()) return; // Ignore stub methods.
2825   // iterate through all interrupt point
2826   // and verify the debug information is valid.
2827   RelocIterator iter((nmethod*)this);
2828   while (iter.next()) {
2829     address stub = NULL;
2830     switch (iter.type()) {
2831       case relocInfo::virtual_call_type:
2832         verify_interrupt_point(iter.addr());
2833         break;
2834       case relocInfo::opt_virtual_call_type:
2835         stub = iter.opt_virtual_call_reloc()->static_stub();
2836         verify_interrupt_point(iter.addr());
2837         break;
2838       case relocInfo::static_call_type:
2839         stub = iter.static_call_reloc()->static_stub();
2840         //verify_interrupt_point(iter.addr());
2841         break;
2842       case relocInfo::runtime_call_type:
2843         address destination = iter.reloc()->value();
2844         // Right now there is no way to find out which entries support
2845         // an interrupt point.  It would be nice if we had this
2846         // information in a table.
2847         break;
2848     }
2849     assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
2850   }
2851 }
2852 
2853 
2854 // -----------------------------------------------------------------------------
2855 // Non-product code
2856 #ifndef PRODUCT
2857 
2858 class DebugScavengeRoot: public OopClosure {
2859   nmethod* _nm;
2860   bool     _ok;
2861 public:
2862   DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
2863   bool ok() { return _ok; }
2864   virtual void do_oop(oop* p) {
2865     if ((*p) == NULL || !(*p)->is_scavengable())  return;
2866     if (_ok) {
2867       _nm->print_nmethod(true);
2868       _ok = false;
2869     }
2870     tty->print_cr("*** scavengable oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
2871                   p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
2872     (*p)->print();
2873   }
2874   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2875 };
2876 
2877 void nmethod::verify_scavenge_root_oops() {
2878   if (UseG1GC) {
2879     return;
2880   }
2881 
2882   if (!on_scavenge_root_list()) {
2883     // Actually look inside, to verify the claim that it's clean.
2884     DebugScavengeRoot debug_scavenge_root(this);
2885     oops_do(&debug_scavenge_root);
2886     if (!debug_scavenge_root.ok())
2887       fatal("found an unadvertised bad scavengable oop in the code cache");
2888   }
2889   assert(scavenge_root_not_marked(), "");
2890 }
2891 
2892 #endif // PRODUCT
2893 
2894 // Printing operations
2895 
2896 void nmethod::print() const {
2897   ResourceMark rm;
2898   ttyLocker ttyl;   // keep the following output all in one block
2899 
2900   tty->print("Compiled method ");
2901 
2902   if (is_compiled_by_c1()) {
2903     tty->print("(c1) ");
2904   } else if (is_compiled_by_c2()) {
2905     tty->print("(c2) ");
2906   } else if (is_compiled_by_shark()) {
2907     tty->print("(shark) ");
2908   } else if (is_compiled_by_jvmci()) {
2909     tty->print("(JVMCI) ");
2910   } else {
2911     tty->print("(nm) ");
2912   }
2913 
2914   print_on(tty, NULL);
2915 
2916   if (WizardMode) {
2917     tty->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
2918     tty->print(" for method " INTPTR_FORMAT , p2i(method()));
2919     tty->print(" { ");
2920     if (is_in_use())      tty->print("in_use ");
2921     if (is_not_entrant()) tty->print("not_entrant ");
2922     if (is_zombie())      tty->print("zombie ");
2923     if (is_unloaded())    tty->print("unloaded ");
2924     if (on_scavenge_root_list())  tty->print("scavenge_root ");
2925     tty->print_cr("}:");
2926   }
2927   if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2928                                               p2i(this),
2929                                               p2i(this) + size(),
2930                                               size());
2931   if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2932                                               p2i(relocation_begin()),
2933                                               p2i(relocation_end()),
2934                                               relocation_size());
2935   if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2936                                               p2i(consts_begin()),
2937                                               p2i(consts_end()),
2938                                               consts_size());
2939   if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2940                                               p2i(insts_begin()),
2941                                               p2i(insts_end()),
2942                                               insts_size());
2943   if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2944                                               p2i(stub_begin()),
2945                                               p2i(stub_end()),
2946                                               stub_size());
2947   if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2948                                               p2i(oops_begin()),
2949                                               p2i(oops_end()),
2950                                               oops_size());
2951   if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2952                                               p2i(metadata_begin()),
2953                                               p2i(metadata_end()),
2954                                               metadata_size());
2955   if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2956                                               p2i(scopes_data_begin()),
2957                                               p2i(scopes_data_end()),
2958                                               scopes_data_size());
2959   if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2960                                               p2i(scopes_pcs_begin()),
2961                                               p2i(scopes_pcs_end()),
2962                                               scopes_pcs_size());
2963   if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2964                                               p2i(dependencies_begin()),
2965                                               p2i(dependencies_end()),
2966                                               dependencies_size());
2967   if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2968                                               p2i(handler_table_begin()),
2969                                               p2i(handler_table_end()),
2970                                               handler_table_size());
2971   if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2972                                               p2i(nul_chk_table_begin()),
2973                                               p2i(nul_chk_table_end()),
2974                                               nul_chk_table_size());
2975 }
2976 
2977 #ifndef PRODUCT
2978 
2979 void nmethod::print_scopes() {
2980   // Find the first pc desc for all scopes in the code and print it.
2981   ResourceMark rm;
2982   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2983     if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
2984       continue;
2985 
2986     ScopeDesc* sd = scope_desc_at(p->real_pc(this));
2987     while (sd != NULL) {
2988       sd->print_on(tty, p);
2989       sd = sd->sender();
2990     }
2991   }
2992 }
2993 
2994 void nmethod::print_dependencies() {
2995   ResourceMark rm;
2996   ttyLocker ttyl;   // keep the following output all in one block
2997   tty->print_cr("Dependencies:");
2998   for (Dependencies::DepStream deps(this); deps.next(); ) {
2999     deps.print_dependency();
3000     Klass* ctxk = deps.context_type();
3001     if (ctxk != NULL) {
3002       if (ctxk->is_instance_klass() && InstanceKlass::cast(ctxk)->is_dependent_nmethod(this)) {
3003         tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
3004       }
3005     }
3006     deps.log_dependency();  // put it into the xml log also
3007   }
3008 }
3009 
3010 
3011 void nmethod::print_relocations() {
3012   ResourceMark m;       // in case methods get printed via the debugger
3013   tty->print_cr("relocations:");
3014   RelocIterator iter(this);
3015   iter.print();
3016   if (UseRelocIndex) {
3017     jint* index_end   = (jint*)relocation_end() - 1;
3018     jint  index_size  = *index_end;
3019     jint* index_start = (jint*)( (address)index_end - index_size );
3020     tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", p2i(index_start), index_size);
3021     if (index_size > 0) {
3022       jint* ip;
3023       for (ip = index_start; ip+2 <= index_end; ip += 2)
3024         tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
3025                       ip[0],
3026                       ip[1],
3027                       p2i(header_end()+ip[0]),
3028                       p2i(relocation_begin()-1+ip[1]));
3029       for (; ip < index_end; ip++)
3030         tty->print_cr("  (%d ?)", ip[0]);
3031       tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", p2i(ip), *ip);
3032       ip++;
3033       tty->print_cr("reloc_end @" INTPTR_FORMAT ":", p2i(ip));
3034     }
3035   }
3036 }
3037 
3038 
3039 void nmethod::print_pcs() {
3040   ResourceMark m;       // in case methods get printed via debugger
3041   tty->print_cr("pc-bytecode offsets:");
3042   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3043     p->print(this);
3044   }
3045 }
3046 
3047 void nmethod::print_recorded_oops() {
3048   tty->print_cr("Recorded oops:");
3049   for (int i = 0; i < oops_count(); i++) {
3050     oop o = oop_at(i);
3051     tty->print("#%3d: " INTPTR_FORMAT " ", i, p2i(o));
3052     if (o == (oop)Universe::non_oop_word()) {
3053       tty->print("non-oop word");
3054     } else {
3055       o->print_value();
3056     }
3057     tty->cr();
3058   }
3059 }
3060 
3061 void nmethod::print_recorded_metadata() {
3062   tty->print_cr("Recorded metadata:");
3063   for (int i = 0; i < metadata_count(); i++) {
3064     Metadata* m = metadata_at(i);
3065     tty->print("#%3d: " INTPTR_FORMAT " ", i, p2i(m));
3066     if (m == (Metadata*)Universe::non_oop_word()) {
3067       tty->print("non-metadata word");
3068     } else {
3069       m->print_value_on_maybe_null(tty);
3070     }
3071     tty->cr();
3072   }
3073 }
3074 
3075 #endif // PRODUCT
3076 
3077 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
3078   RelocIterator iter(this, begin, end);
3079   bool have_one = false;
3080   while (iter.next()) {
3081     have_one = true;
3082     switch (iter.type()) {
3083         case relocInfo::none:                  return "no_reloc";
3084         case relocInfo::oop_type: {
3085           stringStream st;
3086           oop_Relocation* r = iter.oop_reloc();
3087           oop obj = r->oop_value();
3088           st.print("oop(");
3089           if (obj == NULL) st.print("NULL");
3090           else obj->print_value_on(&st);
3091           st.print(")");
3092           return st.as_string();
3093         }
3094         case relocInfo::metadata_type: {
3095           stringStream st;
3096           metadata_Relocation* r = iter.metadata_reloc();
3097           Metadata* obj = r->metadata_value();
3098           st.print("metadata(");
3099           if (obj == NULL) st.print("NULL");
3100           else obj->print_value_on(&st);
3101           st.print(")");
3102           return st.as_string();
3103         }
3104         case relocInfo::runtime_call_type: {
3105           stringStream st;
3106           st.print("runtime_call");
3107           runtime_call_Relocation* r = iter.runtime_call_reloc();
3108           address dest = r->destination();
3109           CodeBlob* cb = CodeCache::find_blob(dest);
3110           if (cb != NULL) {
3111             st.print(" %s", cb->name());
3112           } else {
3113             ResourceMark rm;
3114             const int buflen = 1024;
3115             char* buf = NEW_RESOURCE_ARRAY(char, buflen);
3116             int offset;
3117             if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
3118               st.print(" %s", buf);
3119               if (offset != 0) {
3120                 st.print("+%d", offset);
3121               }
3122             }
3123           }
3124           return st.as_string();
3125         }
3126         case relocInfo::virtual_call_type: {
3127           stringStream st;
3128           st.print_raw("virtual_call");
3129           virtual_call_Relocation* r = iter.virtual_call_reloc();
3130           Method* m = r->method_value();
3131           if (m != NULL) {
3132             assert(m->is_method(), "");
3133             m->print_short_name(&st);
3134           }
3135           return st.as_string();
3136         }
3137         case relocInfo::opt_virtual_call_type: {
3138           stringStream st;
3139           st.print_raw("optimized virtual_call");
3140           opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc();
3141           Method* m = r->method_value();
3142           if (m != NULL) {
3143             assert(m->is_method(), "");
3144             m->print_short_name(&st);
3145           }
3146           return st.as_string();
3147         }
3148         case relocInfo::static_call_type: {
3149           stringStream st;
3150           st.print_raw("static_call");
3151           static_call_Relocation* r = iter.static_call_reloc();
3152           Method* m = r->method_value();
3153           if (m != NULL) {
3154             assert(m->is_method(), "");
3155             m->print_short_name(&st);
3156           }
3157           return st.as_string();
3158         }
3159         case relocInfo::static_stub_type:      return "static_stub";
3160         case relocInfo::external_word_type:    return "external_word";
3161         case relocInfo::internal_word_type:    return "internal_word";
3162         case relocInfo::section_word_type:     return "section_word";
3163         case relocInfo::poll_type:             return "poll";
3164         case relocInfo::poll_return_type:      return "poll_return";
3165         case relocInfo::type_mask:             return "type_bit_mask";
3166     }
3167   }
3168   return have_one ? "other" : NULL;
3169 }
3170 
3171 // Return a the last scope in (begin..end]
3172 ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
3173   PcDesc* p = pc_desc_near(begin+1);
3174   if (p != NULL && p->real_pc(this) <= end) {
3175     return new ScopeDesc(this, p->scope_decode_offset(),
3176                          p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(),
3177                          p->return_oop());
3178   }
3179   return NULL;
3180 }
3181 
3182 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
3183   if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
3184   if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
3185   if (JVMCI_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
3186   if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
3187   if (JVMCI_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
3188 
3189   if (has_method_handle_invokes())
3190     if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
3191 
3192   if (block_begin == consts_begin())            stream->print_cr("[Constants]");
3193 
3194   if (block_begin == entry_point()) {
3195     methodHandle m = method();
3196     if (m.not_null()) {
3197       stream->print("  # ");
3198       m->print_value_on(stream);
3199       stream->cr();
3200     }
3201     if (m.not_null() && !is_osr_method()) {
3202       ResourceMark rm;
3203       int sizeargs = m->size_of_parameters();
3204       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
3205       VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
3206       {
3207         int sig_index = 0;
3208         if (!m->is_static())
3209           sig_bt[sig_index++] = T_OBJECT; // 'this'
3210         for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
3211           BasicType t = ss.type();
3212           sig_bt[sig_index++] = t;
3213           if (type2size[t] == 2) {
3214             sig_bt[sig_index++] = T_VOID;
3215           } else {
3216             assert(type2size[t] == 1, "size is 1 or 2");
3217           }
3218         }
3219         assert(sig_index == sizeargs, "");
3220       }
3221       const char* spname = "sp"; // make arch-specific?
3222       intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
3223       int stack_slot_offset = this->frame_size() * wordSize;
3224       int tab1 = 14, tab2 = 24;
3225       int sig_index = 0;
3226       int arg_index = (m->is_static() ? 0 : -1);
3227       bool did_old_sp = false;
3228       for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
3229         bool at_this = (arg_index == -1);
3230         bool at_old_sp = false;
3231         BasicType t = (at_this ? T_OBJECT : ss.type());
3232         assert(t == sig_bt[sig_index], "sigs in sync");
3233         if (at_this)
3234           stream->print("  # this: ");
3235         else
3236           stream->print("  # parm%d: ", arg_index);
3237         stream->move_to(tab1);
3238         VMReg fst = regs[sig_index].first();
3239         VMReg snd = regs[sig_index].second();
3240         if (fst->is_reg()) {
3241           stream->print("%s", fst->name());
3242           if (snd->is_valid())  {
3243             stream->print(":%s", snd->name());
3244           }
3245         } else if (fst->is_stack()) {
3246           int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
3247           if (offset == stack_slot_offset)  at_old_sp = true;
3248           stream->print("[%s+0x%x]", spname, offset);
3249         } else {
3250           stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
3251         }
3252         stream->print(" ");
3253         stream->move_to(tab2);
3254         stream->print("= ");
3255         if (at_this) {
3256           m->method_holder()->print_value_on(stream);
3257         } else {
3258           bool did_name = false;
3259           if (!at_this && ss.is_object()) {
3260             Symbol* name = ss.as_symbol_or_null();
3261             if (name != NULL) {
3262               name->print_value_on(stream);
3263               did_name = true;
3264             }
3265           }
3266           if (!did_name)
3267             stream->print("%s", type2name(t));
3268         }
3269         if (at_old_sp) {
3270           stream->print("  (%s of caller)", spname);
3271           did_old_sp = true;
3272         }
3273         stream->cr();
3274         sig_index += type2size[t];
3275         arg_index += 1;
3276         if (!at_this)  ss.next();
3277       }
3278       if (!did_old_sp) {
3279         stream->print("  # ");
3280         stream->move_to(tab1);
3281         stream->print("[%s+0x%x]", spname, stack_slot_offset);
3282         stream->print("  (%s of caller)", spname);
3283         stream->cr();
3284       }
3285     }
3286   }
3287 }
3288 
3289 void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
3290   // First, find an oopmap in (begin, end].
3291   // We use the odd half-closed interval so that oop maps and scope descs
3292   // which are tied to the byte after a call are printed with the call itself.
3293   address base = code_begin();
3294   ImmutableOopMapSet* oms = oop_maps();
3295   if (oms != NULL) {
3296     for (int i = 0, imax = oms->count(); i < imax; i++) {
3297       const ImmutableOopMapPair* pair = oms->pair_at(i);
3298       const ImmutableOopMap* om = pair->get_from(oms);
3299       address pc = base + pair->pc_offset();
3300       if (pc > begin) {
3301         if (pc <= end) {
3302           st->move_to(column);
3303           st->print("; ");
3304           om->print_on(st);
3305         }
3306         break;
3307       }
3308     }
3309   }
3310 
3311   // Print any debug info present at this pc.
3312   ScopeDesc* sd  = scope_desc_in(begin, end);
3313   if (sd != NULL) {
3314     st->move_to(column);
3315     if (sd->bci() == SynchronizationEntryBCI) {
3316       st->print(";*synchronization entry");
3317     } else {
3318       if (sd->method() == NULL) {
3319         st->print("method is NULL");
3320       } else if (sd->method()->is_native()) {
3321         st->print("method is native");
3322       } else {
3323         Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
3324         st->print(";*%s", Bytecodes::name(bc));
3325         switch (bc) {
3326         case Bytecodes::_invokevirtual:
3327         case Bytecodes::_invokespecial:
3328         case Bytecodes::_invokestatic:
3329         case Bytecodes::_invokeinterface:
3330           {
3331             Bytecode_invoke invoke(sd->method(), sd->bci());
3332             st->print(" ");
3333             if (invoke.name() != NULL)
3334               invoke.name()->print_symbol_on(st);
3335             else
3336               st->print("<UNKNOWN>");
3337             break;
3338           }
3339         case Bytecodes::_getfield:
3340         case Bytecodes::_putfield:
3341         case Bytecodes::_getstatic:
3342         case Bytecodes::_putstatic:
3343           {
3344             Bytecode_field field(sd->method(), sd->bci());
3345             st->print(" ");
3346             if (field.name() != NULL)
3347               field.name()->print_symbol_on(st);
3348             else
3349               st->print("<UNKNOWN>");
3350           }
3351         }
3352       }
3353       st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop());
3354     }
3355 
3356     // Print all scopes
3357     for (;sd != NULL; sd = sd->sender()) {
3358       st->move_to(column);
3359       st->print("; -");
3360       if (sd->method() == NULL) {
3361         st->print("method is NULL");
3362       } else {
3363         sd->method()->print_short_name(st);
3364       }
3365       int lineno = sd->method()->line_number_from_bci(sd->bci());
3366       if (lineno != -1) {
3367         st->print("@%d (line %d)", sd->bci(), lineno);
3368       } else {
3369         st->print("@%d", sd->bci());
3370       }
3371       st->cr();
3372     }
3373   }
3374 
3375   // Print relocation information
3376   const char* str = reloc_string_for(begin, end);
3377   if (str != NULL) {
3378     if (sd != NULL) st->cr();
3379     st->move_to(column);
3380     st->print(";   {%s}", str);
3381   }
3382   int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
3383   if (cont_offset != 0) {
3384     st->move_to(column);
3385     st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
3386   }
3387 
3388 }
3389 
3390 #ifndef PRODUCT
3391 
3392 void nmethod::print_value_on(outputStream* st) const {
3393   st->print("nmethod");
3394   print_on(st, NULL);
3395 }
3396 
3397 void nmethod::print_calls(outputStream* st) {
3398   RelocIterator iter(this);
3399   while (iter.next()) {
3400     switch (iter.type()) {
3401     case relocInfo::virtual_call_type:
3402     case relocInfo::opt_virtual_call_type: {
3403       VerifyMutexLocker mc(CompiledIC_lock);
3404       CompiledIC_at(&iter)->print();
3405       break;
3406     }
3407     case relocInfo::static_call_type:
3408       st->print_cr("Static call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
3409       compiledStaticCall_at(iter.reloc())->print();
3410       break;
3411     }
3412   }
3413 }
3414 
3415 void nmethod::print_handler_table() {
3416   ExceptionHandlerTable(this).print();
3417 }
3418 
3419 void nmethod::print_nul_chk_table() {
3420   ImplicitExceptionTable(this).print(code_begin());
3421 }
3422 
3423 void nmethod::print_statistics() {
3424   ttyLocker ttyl;
3425   if (xtty != NULL)  xtty->head("statistics type='nmethod'");
3426   native_nmethod_stats.print_native_nmethod_stats();
3427 #ifdef COMPILER1
3428   c1_java_nmethod_stats.print_nmethod_stats("C1");
3429 #endif
3430 #ifdef COMPILER2
3431   c2_java_nmethod_stats.print_nmethod_stats("C2");
3432 #endif
3433 #if INCLUDE_JVMCI
3434   jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
3435 #endif
3436 #ifdef SHARK
3437   shark_java_nmethod_stats.print_nmethod_stats("Shark");
3438 #endif
3439   unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
3440   DebugInformationRecorder::print_statistics();
3441 #ifndef PRODUCT
3442   pc_nmethod_stats.print_pc_stats();
3443 #endif
3444   Dependencies::print_statistics();
3445   if (xtty != NULL)  xtty->tail("statistics");
3446 }
3447 
3448 #endif // !PRODUCT
3449 
3450 #if INCLUDE_JVMCI
3451 void nmethod::clear_jvmci_installed_code() {
3452   // write_ref_method_pre/post can only be safely called at a
3453   // safepoint or while holding the CodeCache_lock
3454   assert(CodeCache_lock->is_locked() ||
3455          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
3456   if (_jvmci_installed_code != NULL) {
3457     // This must be done carefully to maintain nmethod remembered sets properly
3458     BarrierSet* bs = Universe::heap()->barrier_set();
3459     bs->write_ref_nmethod_pre(&_jvmci_installed_code, this);
3460     _jvmci_installed_code = NULL;
3461     bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
3462   }
3463 }
3464 
3465 void nmethod::maybe_invalidate_installed_code() {
3466   assert(Patching_lock->is_locked() ||
3467          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
3468   oop installed_code = jvmci_installed_code();
3469   if (installed_code != NULL) {
3470     nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
3471     if (nm == NULL || nm != this) {
3472       // The link has been broken or the InstalledCode instance is
3473       // associated with another nmethod so do nothing.
3474       return;
3475     }
3476     if (!is_alive()) {
3477       // Break the link between nmethod and InstalledCode such that the nmethod
3478       // can subsequently be flushed safely.  The link must be maintained while
3479       // the method could have live activations since invalidateInstalledCode
3480       // might want to invalidate all existing activations.
3481       InstalledCode::set_address(installed_code, 0);
3482       InstalledCode::set_entryPoint(installed_code, 0);
3483     } else if (is_not_entrant()) {
3484       // Remove the entry point so any invocation will fail but keep
3485       // the address link around that so that existing activations can
3486       // be invalidated.
3487       InstalledCode::set_entryPoint(installed_code, 0);
3488     }
3489   }
3490 }
3491 
3492 void nmethod::invalidate_installed_code(Handle installedCode, TRAPS) {
3493   if (installedCode() == NULL) {
3494     THROW(vmSymbols::java_lang_NullPointerException());
3495   }
3496   jlong nativeMethod = InstalledCode::address(installedCode);
3497   nmethod* nm = (nmethod*)nativeMethod;
3498   if (nm == NULL) {
3499     // Nothing to do
3500     return;
3501   }
3502 
3503   nmethodLocker nml(nm);
3504 #ifdef ASSERT
3505   {
3506     MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
3507     // This relationship can only be checked safely under a lock
3508     assert(nm == NULL || !nm->is_alive() || nm->jvmci_installed_code() == installedCode(), "sanity check");
3509   }
3510 #endif
3511 
3512   if (nm->is_alive()) {
3513     // The nmethod state machinery maintains the link between the
3514     // HotSpotInstalledCode and nmethod* so as long as the nmethod appears to be
3515     // alive assume there is work to do and deoptimize the nmethod.
3516     nm->mark_for_deoptimization();
3517     VM_Deoptimize op;
3518     VMThread::execute(&op);
3519   }
3520 
3521   MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
3522   // Check that it's still associated with the same nmethod and break
3523   // the link if it is.
3524   if (InstalledCode::address(installedCode) == nativeMethod) {
3525     InstalledCode::set_address(installedCode, 0);
3526   }
3527 }
3528 
3529 char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) {
3530   if (!this->is_compiled_by_jvmci()) {
3531     return NULL;
3532   }
3533   oop installedCode = this->jvmci_installed_code();
3534   if (installedCode != NULL) {
3535     oop installedCodeName = NULL;
3536     if (installedCode->is_a(InstalledCode::klass())) {
3537       installedCodeName = InstalledCode::name(installedCode);
3538     }
3539     if (installedCodeName != NULL) {
3540       return java_lang_String::as_utf8_string(installedCodeName, buf, (int)buflen);
3541     } else {
3542       jio_snprintf(buf, buflen, "null");
3543       return buf;
3544     }
3545   }
3546   jio_snprintf(buf, buflen, "noInstalledCode");
3547   return buf;
3548 }
3549 #endif
3550 
3551 Method* nmethod::attached_method(address call_instr) {
3552   assert(code_contains(call_instr), "not part of the nmethod");
3553   RelocIterator iter(this, call_instr, call_instr + 1);
3554   while (iter.next()) {
3555     if (iter.addr() == call_instr) {
3556       switch(iter.type()) {
3557         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
3558         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
3559         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
3560       }
3561     }
3562   }
3563   return NULL; // not found
3564 }
3565 
3566 Method* nmethod::attached_method_before_pc(address pc) {
3567   if (NativeCall::is_call_before(pc)) {
3568     NativeCall* ncall = nativeCall_before(pc);
3569     return attached_method(ncall->instruction_address());
3570   }
3571   return NULL; // not a call
3572 }
3573