1 /*
   2  * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/compiledIC.hpp"
  27 #include "code/scopeDesc.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "prims/methodHandles.hpp"
  30 #include "interpreter/bytecode.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "runtime/mutexLocker.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 
  35 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
  36   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  37   _method(method), _mark_for_deoptimization_status(not_marked) {
  38   init_defaults();
  39 }
  40 
  41 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
  42   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  43   _method(method), _mark_for_deoptimization_status(not_marked) {
  44   init_defaults();
  45 }
  46 
  47 void CompiledMethod::init_defaults() {
  48   _has_unsafe_access          = 0;
  49   _has_method_handle_invokes  = 0;
  50   _lazy_critical_native       = 0;
  51   _has_wide_vectors           = 0;
  52   _unloading_clock            = 0;
  53 }
  54 
  55 bool CompiledMethod::is_method_handle_return(address return_pc) {
  56   if (!has_method_handle_invokes())  return false;
  57   PcDesc* pd = pc_desc_at(return_pc);
  58   if (pd == NULL)
  59     return false;
  60   return pd->is_method_handle_invoke();
  61 }
  62 
  63 // When using JVMCI the address might be off by the size of a call instruction.
  64 bool CompiledMethod::is_deopt_entry(address pc) {
  65   return pc == deopt_handler_begin()
  66 #if INCLUDE_JVMCI
  67     || (is_compiled_by_jvmci() && pc == (deopt_handler_begin() + NativeCall::instruction_size))
  68 #endif
  69     ;
  70 }
  71 
  72 // Returns a string version of the method state.
  73 const char* CompiledMethod::state() const {
  74   int state = get_state();
  75   switch (state) {
  76   case in_use:
  77     return "in use";
  78   case not_used:
  79     return "not_used";
  80   case not_entrant:
  81     return "not_entrant";
  82   case zombie:
  83     return "zombie";
  84   case unloaded:
  85     return "unloaded";
  86   default:
  87     fatal("unexpected method state: %d", state);
  88     return NULL;
  89   }
  90 }
  91 
  92 //-----------------------------------------------------------------------------
  93 
  94 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
  95   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
  96   assert(new_entry != NULL,"Must be non null");
  97   assert(new_entry->next() == NULL, "Must be null");
  98 
  99   ExceptionCache *ec = exception_cache();
 100   if (ec != NULL) {
 101     new_entry->set_next(ec);
 102   }
 103   release_set_exception_cache(new_entry);
 104 }
 105 
 106 void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
 107   ExceptionCache* prev = NULL;
 108   ExceptionCache* curr = exception_cache();
 109 
 110   while (curr != NULL) {
 111     ExceptionCache* next = curr->next();
 112 
 113     Klass* ex_klass = curr->exception_type();
 114     if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
 115       if (prev == NULL) {
 116         set_exception_cache(next);
 117       } else {
 118         prev->set_next(next);
 119       }
 120       delete curr;
 121       // prev stays the same.
 122     } else {
 123       prev = curr;
 124     }
 125 
 126     curr = next;
 127   }
 128 }
 129 
 130 // public method for accessing the exception cache
 131 // These are the public access methods.
 132 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
 133   // We never grab a lock to read the exception cache, so we may
 134   // have false negatives. This is okay, as it can only happen during
 135   // the first few exception lookups for a given nmethod.
 136   ExceptionCache* ec = exception_cache();
 137   while (ec != NULL) {
 138     address ret_val;
 139     if ((ret_val = ec->match(exception,pc)) != NULL) {
 140       return ret_val;
 141     }
 142     ec = ec->next();
 143   }
 144   return NULL;
 145 }
 146 
 147 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
 148   // There are potential race conditions during exception cache updates, so we
 149   // must own the ExceptionCache_lock before doing ANY modifications. Because
 150   // we don't lock during reads, it is possible to have several threads attempt
 151   // to update the cache with the same data. We need to check for already inserted
 152   // copies of the current data before adding it.
 153 
 154   MutexLocker ml(ExceptionCache_lock);
 155   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
 156 
 157   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
 158     target_entry = new ExceptionCache(exception,pc,handler);
 159     add_exception_cache_entry(target_entry);
 160   }
 161 }
 162 
 163 //-------------end of code for ExceptionCache--------------
 164 
 165 // private method for handling exception cache
 166 // These methods are private, and used to manipulate the exception cache
 167 // directly.
 168 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
 169   ExceptionCache* ec = exception_cache();
 170   while (ec != NULL) {
 171     if (ec->match_exception_with_space(exception)) {
 172       return ec;
 173     }
 174     ec = ec->next();
 175   }
 176   return NULL;
 177 }
 178 
 179 bool CompiledMethod::is_at_poll_return(address pc) {
 180   RelocIterator iter(this, pc, pc+1);
 181   while (iter.next()) {
 182     if (iter.type() == relocInfo::poll_return_type)
 183       return true;
 184   }
 185   return false;
 186 }
 187 
 188 
 189 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
 190   RelocIterator iter(this, pc, pc+1);
 191   while (iter.next()) {
 192     relocInfo::relocType t = iter.type();
 193     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
 194       return true;
 195   }
 196   return false;
 197 }
 198 
 199 void CompiledMethod::verify_oop_relocations() {
 200   // Ensure sure that the code matches the current oop values
 201   RelocIterator iter(this, NULL, NULL);
 202   while (iter.next()) {
 203     if (iter.type() == relocInfo::oop_type) {
 204       oop_Relocation* reloc = iter.oop_reloc();
 205       if (!reloc->oop_is_immediate()) {
 206         reloc->verify_oop_relocation();
 207       }
 208     }
 209   }
 210 }
 211 
 212 
 213 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
 214   PcDesc* pd = pc_desc_at(pc);
 215   guarantee(pd != NULL, "scope must be present");
 216   return new ScopeDesc(this, pd->scope_decode_offset(),
 217                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 218                        pd->return_oop(), pd->return_vt());
 219 }
 220 
 221 void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
 222   assert_locked_or_safepoint(CompiledIC_lock);
 223 
 224   // If the method is not entrant or zombie then a JMP is plastered over the
 225   // first few bytes.  If an oop in the old code was there, that oop
 226   // should not get GC'd.  Skip the first few bytes of oops on
 227   // not-entrant methods.
 228   address low_boundary = verified_entry_point();
 229   if (!is_in_use() && is_nmethod()) {
 230     low_boundary += NativeJump::instruction_size;
 231     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 232     // This means that the low_boundary is going to be a little too high.
 233     // This shouldn't matter, since oops of non-entrant methods are never used.
 234     // In fact, why are we bothering to look at oops in a non-entrant method??
 235   }
 236 
 237   // Find all calls in an nmethod and clear the ones that point to non-entrant,
 238   // zombie and unloaded nmethods.
 239   ResourceMark rm;
 240   RelocIterator iter(this, low_boundary);
 241   while(iter.next()) {
 242     switch(iter.type()) {
 243       case relocInfo::virtual_call_type:
 244       case relocInfo::opt_virtual_call_type: {
 245         CompiledIC *ic = CompiledIC_at(&iter);
 246         // Ok, to lookup references to zombies here
 247         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
 248         if( cb != NULL && cb->is_compiled() ) {
 249           CompiledMethod* nm = cb->as_compiled_method();
 250           // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 251           if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
 252         }
 253         break;
 254       }
 255       case relocInfo::static_call_type: {
 256           CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
 257           CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
 258           if( cb != NULL && cb->is_compiled() ) {
 259             CompiledMethod* cm = cb->as_compiled_method();
 260             // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 261             if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
 262               csc->set_to_clean();
 263             }
 264           }
 265         break;
 266       }
 267     }
 268   }
 269 }
 270 
 271 int CompiledMethod::verify_icholder_relocations() {
 272   ResourceMark rm;
 273   int count = 0;
 274 
 275   RelocIterator iter(this);
 276   while(iter.next()) {
 277     if (iter.type() == relocInfo::virtual_call_type) {
 278       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
 279         CompiledIC *ic = CompiledIC_at(&iter);
 280         if (TraceCompiledIC) {
 281           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
 282           ic->print();
 283         }
 284         assert(ic->cached_icholder() != NULL, "must be non-NULL");
 285         count++;
 286       }
 287     }
 288   }
 289 
 290   return count;
 291 }
 292 
 293 // Method that knows how to preserve outgoing arguments at call. This method must be
 294 // called with a frame corresponding to a Java invoke
 295 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
 296 #ifndef SHARK
 297   if (method() != NULL && !method()->is_native()) {
 298     address pc = fr.pc();
 299     SimpleScopeDesc ssd(this, pc);
 300     Bytecode_invoke call(ssd.method(), ssd.bci());
 301     bool has_receiver = call.has_receiver();
 302     bool has_appendix = call.has_appendix();
 303     Symbol* signature = call.signature();
 304 
 305     // The method attached by JIT-compilers should be used, if present.
 306     // Bytecode can be inaccurate in such case.
 307     Method* callee = attached_method_before_pc(pc);
 308     if (callee != NULL) {
 309       has_receiver = !(callee->access_flags().is_static());
 310       has_appendix = false;
 311       signature = callee->signature();
 312     }
 313 
 314     // If value types are passed as fields, use the extended signature
 315     // which contains the types of all (oop) fields of the value type.
 316     if (ValueTypePassFieldsAsArgs && callee != NULL) {
 317       // Check if receiver or one of the arguments is a value type
 318       bool has_value_receiver = has_receiver && callee->method_holder()->is_value();
 319       bool has_value_argument = has_value_receiver;
 320       for (SignatureStream ss(signature); !has_value_argument && !ss.at_return_type(); ss.next()) {
 321         if (ss.type() == T_VALUETYPE) {
 322           has_value_argument = true;
 323         }
 324       }
 325       if (has_value_argument) {
 326         // Get the extended signature from the callee's adapter through the attached method
 327         signature = callee->adapter()->get_sig_extended();
 328         assert(signature != NULL, "signature is null");
 329         has_receiver = false; // The extended signature contains the receiver type
 330       }
 331     }
 332 
 333     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
 334   }
 335 #endif // !SHARK
 336 }
 337 
 338 // -----------------------------------------------------------------------------
 339 // CompiledMethod::get_deopt_original_pc
 340 //
 341 // Return the original PC for the given PC if:
 342 // (a) the given PC belongs to a nmethod and
 343 // (b) it is a deopt PC
 344 address CompiledMethod::get_deopt_original_pc(const frame* fr) {
 345   if (fr->cb() == NULL)  return NULL;
 346 
 347   CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
 348   if (cm != NULL && cm->is_deopt_pc(fr->pc()))
 349     return cm->get_original_pc(fr);
 350 
 351   return NULL;
 352 }
 353 
 354 Method* CompiledMethod::attached_method(address call_instr) {
 355   assert(code_contains(call_instr), "not part of the nmethod");
 356   RelocIterator iter(this, call_instr, call_instr + 1);
 357   while (iter.next()) {
 358     if (iter.addr() == call_instr) {
 359       switch(iter.type()) {
 360         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
 361         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
 362         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
 363       }
 364     }
 365   }
 366   return NULL; // not found
 367 }
 368 
 369 Method* CompiledMethod::attached_method_before_pc(address pc) {
 370   if (NativeCall::is_call_before(pc)) {
 371     NativeCall* ncall = nativeCall_before(pc);
 372     return attached_method(ncall->instruction_address());
 373   }
 374   return NULL; // not a call
 375 }
 376 
 377 void CompiledMethod::clear_inline_caches() {
 378   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
 379   if (is_zombie()) {
 380     return;
 381   }
 382 
 383   RelocIterator iter(this);
 384   while (iter.next()) {
 385     iter.reloc()->clear_inline_cache();
 386   }
 387 }
 388 
 389 // Clear ICStubs of all compiled ICs
 390 void CompiledMethod::clear_ic_stubs() {
 391   assert_locked_or_safepoint(CompiledIC_lock);
 392   RelocIterator iter(this);
 393   while(iter.next()) {
 394     if (iter.type() == relocInfo::virtual_call_type) {
 395       CompiledIC* ic = CompiledIC_at(&iter);
 396       ic->clear_ic_stub();
 397     }
 398   }
 399 }
 400 
 401 #ifdef ASSERT
 402 
 403 class CheckClass : AllStatic {
 404   static BoolObjectClosure* _is_alive;
 405 
 406   // Check class_loader is alive for this bit of metadata.
 407   static void check_class(Metadata* md) {
 408     Klass* klass = NULL;
 409     if (md->is_klass()) {
 410       klass = ((Klass*)md);
 411     } else if (md->is_method()) {
 412       klass = ((Method*)md)->method_holder();
 413     } else if (md->is_methodData()) {
 414       klass = ((MethodData*)md)->method()->method_holder();
 415     } else {
 416       md->print();
 417       ShouldNotReachHere();
 418     }
 419     assert(klass->is_loader_alive(_is_alive), "must be alive");
 420   }
 421  public:
 422   static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) {
 423     assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
 424     _is_alive = is_alive;
 425     nm->metadata_do(check_class);
 426   }
 427 };
 428 
 429 // This is called during a safepoint so can use static data
 430 BoolObjectClosure* CheckClass::_is_alive = NULL;
 431 #endif // ASSERT
 432 
 433 
 434 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
 435   if (ic->is_icholder_call()) {
 436     // The only exception is compiledICHolder oops which may
 437     // yet be marked below. (We check this further below).
 438     CompiledICHolder* cichk_oop = ic->cached_icholder();
 439 
 440     if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
 441         cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
 442       return;
 443     }
 444   } else {
 445     Metadata* ic_oop = ic->cached_metadata();
 446     if (ic_oop != NULL) {
 447       if (ic_oop->is_klass()) {
 448         if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
 449           return;
 450         }
 451       } else if (ic_oop->is_method()) {
 452         if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
 453           return;
 454         }
 455       } else {
 456         ShouldNotReachHere();
 457       }
 458     }
 459   }
 460 
 461   ic->set_to_clean();
 462 }
 463 
 464 unsigned char CompiledMethod::_global_unloading_clock = 0;
 465 
 466 void CompiledMethod::increase_unloading_clock() {
 467   _global_unloading_clock++;
 468   if (_global_unloading_clock == 0) {
 469     // _nmethods are allocated with _unloading_clock == 0,
 470     // so 0 is never used as a clock value.
 471     _global_unloading_clock = 1;
 472   }
 473 }
 474 
 475 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
 476   OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
 477 }
 478 
 479 unsigned char CompiledMethod::unloading_clock() {
 480   return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
 481 }
 482 
 483 // Processing of oop references should have been sufficient to keep
 484 // all strong references alive.  Any weak references should have been
 485 // cleared as well.  Visit all the metadata and ensure that it's
 486 // really alive.
 487 void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
 488 #ifdef ASSERT
 489     RelocIterator iter(this, low_boundary);
 490     while (iter.next()) {
 491     // static_stub_Relocations may have dangling references to
 492     // Method*s so trim them out here.  Otherwise it looks like
 493     // compiled code is maintaining a link to dead metadata.
 494     address static_call_addr = NULL;
 495     if (iter.type() == relocInfo::opt_virtual_call_type) {
 496       CompiledIC* cic = CompiledIC_at(&iter);
 497       if (!cic->is_call_to_interpreted()) {
 498         static_call_addr = iter.addr();
 499       }
 500     } else if (iter.type() == relocInfo::static_call_type) {
 501       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
 502       if (!csc->is_call_to_interpreted()) {
 503         static_call_addr = iter.addr();
 504       }
 505     }
 506     if (static_call_addr != NULL) {
 507       RelocIterator sciter(this, low_boundary);
 508       while (sciter.next()) {
 509         if (sciter.type() == relocInfo::static_stub_type &&
 510             sciter.static_stub_reloc()->static_call() == static_call_addr) {
 511           sciter.static_stub_reloc()->clear_inline_cache();
 512         }
 513       }
 514     }
 515   }
 516   // Check that the metadata embedded in the nmethod is alive
 517   CheckClass::do_check_class(is_alive, this);
 518 #endif
 519 }
 520 
 521 // This is called at the end of the strong tracing/marking phase of a
 522 // GC to unload an nmethod if it contains otherwise unreachable
 523 // oops.
 524 
 525 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 526   // Make sure the oop's ready to receive visitors
 527   assert(!is_zombie() && !is_unloaded(),
 528          "should not call follow on zombie or unloaded nmethod");
 529 
 530   // If the method is not entrant then a JMP is plastered over the
 531   // first few bytes.  If an oop in the old code was there, that oop
 532   // should not get GC'd.  Skip the first few bytes of oops on
 533   // not-entrant methods.
 534   address low_boundary = verified_entry_point();
 535   if (is_not_entrant()) {
 536     low_boundary += NativeJump::instruction_size;
 537     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 538     // (See comment above.)
 539   }
 540 
 541   // The RedefineClasses() API can cause the class unloading invariant
 542   // to no longer be true. See jvmtiExport.hpp for details.
 543   // Also, leave a debugging breadcrumb in local flag.
 544   if (JvmtiExport::has_redefined_a_class()) {
 545     // This set of the unloading_occurred flag is done before the
 546     // call to post_compiled_method_unload() so that the unloading
 547     // of this nmethod is reported.
 548     unloading_occurred = true;
 549   }
 550 
 551   // Exception cache
 552   clean_exception_cache(is_alive);
 553 
 554   // If class unloading occurred we first iterate over all inline caches and
 555   // clear ICs where the cached oop is referring to an unloaded klass or method.
 556   // The remaining live cached oops will be traversed in the relocInfo::oop_type
 557   // iteration below.
 558   if (unloading_occurred) {
 559     RelocIterator iter(this, low_boundary);
 560     while(iter.next()) {
 561       if (iter.type() == relocInfo::virtual_call_type) {
 562         CompiledIC *ic = CompiledIC_at(&iter);
 563         clean_ic_if_metadata_is_dead(ic, is_alive);
 564       }
 565     }
 566   }
 567 
 568   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 569     return;
 570   }
 571 
 572 #if INCLUDE_JVMCI
 573   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 574     return;
 575   }
 576 #endif
 577 
 578   // Ensure that all metadata is still alive
 579   verify_metadata_loaders(low_boundary, is_alive);
 580 }
 581 
 582 template <class CompiledICorStaticCall>
 583 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) {
 584   // Ok, to lookup references to zombies here
 585   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
 586   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
 587   if (nm != NULL) {
 588     if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
 589       // The nmethod has not been processed yet.
 590       return true;
 591     }
 592 
 593     // Clean inline caches pointing to both zombie and not_entrant methods
 594     if (!nm->is_in_use() || (nm->method()->code() != nm)) {
 595       ic->set_to_clean();
 596       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
 597     }
 598   }
 599 
 600   return false;
 601 }
 602 
 603 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) {
 604   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
 605 }
 606 
 607 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) {
 608   return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
 609 }
 610 
 611 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
 612   ResourceMark rm;
 613 
 614   // Make sure the oop's ready to receive visitors
 615   assert(!is_zombie() && !is_unloaded(),
 616          "should not call follow on zombie or unloaded nmethod");
 617 
 618   // If the method is not entrant then a JMP is plastered over the
 619   // first few bytes.  If an oop in the old code was there, that oop
 620   // should not get GC'd.  Skip the first few bytes of oops on
 621   // not-entrant methods.
 622   address low_boundary = verified_entry_point();
 623   if (is_not_entrant()) {
 624     low_boundary += NativeJump::instruction_size;
 625     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 626     // (See comment above.)
 627   }
 628 
 629   // The RedefineClasses() API can cause the class unloading invariant
 630   // to no longer be true. See jvmtiExport.hpp for details.
 631   // Also, leave a debugging breadcrumb in local flag.
 632   if (JvmtiExport::has_redefined_a_class()) {
 633     // This set of the unloading_occurred flag is done before the
 634     // call to post_compiled_method_unload() so that the unloading
 635     // of this nmethod is reported.
 636     unloading_occurred = true;
 637   }
 638 
 639   // Exception cache
 640   clean_exception_cache(is_alive);
 641 
 642   bool postponed = false;
 643 
 644   RelocIterator iter(this, low_boundary);
 645   while(iter.next()) {
 646 
 647     switch (iter.type()) {
 648 
 649     case relocInfo::virtual_call_type:
 650       if (unloading_occurred) {
 651         // If class unloading occurred we first iterate over all inline caches and
 652         // clear ICs where the cached oop is referring to an unloaded klass or method.
 653         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
 654       }
 655 
 656       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 657       break;
 658 
 659     case relocInfo::opt_virtual_call_type:
 660       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 661       break;
 662 
 663     case relocInfo::static_call_type:
 664       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 665       break;
 666 
 667     case relocInfo::oop_type:
 668       // handled by do_unloading_oops below
 669       break;
 670 
 671     case relocInfo::metadata_type:
 672       break; // nothing to do.
 673     }
 674   }
 675 
 676   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 677     return postponed;
 678   }
 679 
 680 #if INCLUDE_JVMCI
 681   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 682     return postponed;
 683   }
 684 #endif
 685 
 686   // Ensure that all metadata is still alive
 687   verify_metadata_loaders(low_boundary, is_alive);
 688 
 689   return postponed;
 690 }
 691 
 692 void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
 693   ResourceMark rm;
 694 
 695   // Make sure the oop's ready to receive visitors
 696   assert(!is_zombie(),
 697          "should not call follow on zombie nmethod");
 698 
 699   // If the method is not entrant then a JMP is plastered over the
 700   // first few bytes.  If an oop in the old code was there, that oop
 701   // should not get GC'd.  Skip the first few bytes of oops on
 702   // not-entrant methods.
 703   address low_boundary = verified_entry_point();
 704   if (is_not_entrant()) {
 705     low_boundary += NativeJump::instruction_size;
 706     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 707     // (See comment above.)
 708   }
 709 
 710   RelocIterator iter(this, low_boundary);
 711   while(iter.next()) {
 712 
 713     switch (iter.type()) {
 714 
 715     case relocInfo::virtual_call_type:
 716       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 717       break;
 718 
 719     case relocInfo::opt_virtual_call_type:
 720       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 721       break;
 722 
 723     case relocInfo::static_call_type:
 724       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 725       break;
 726     }
 727   }
 728 }