1 /*
   2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/compiledIC.hpp"
  27 #include "code/scopeDesc.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "prims/methodHandles.hpp"
  30 #include "interpreter/bytecode.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "runtime/mutexLocker.hpp"
  33 
  34 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
  35   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  36   _method(method), _mark_for_deoptimization_status(not_marked) {
  37   init_defaults();
  38 }
  39 
  40 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
  41   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  42   _method(method), _mark_for_deoptimization_status(not_marked) {
  43   init_defaults();
  44 }
  45 
  46 void CompiledMethod::init_defaults() {
  47   _has_unsafe_access          = 0;
  48   _has_method_handle_invokes  = 0;
  49   _lazy_critical_native       = 0;
  50   _has_wide_vectors           = 0;
  51   _unloading_clock            = 0;
  52 }
  53 
  54 bool CompiledMethod::is_method_handle_return(address return_pc) {
  55   if (!has_method_handle_invokes())  return false;
  56   PcDesc* pd = pc_desc_at(return_pc);
  57   if (pd == NULL)
  58     return false;
  59   return pd->is_method_handle_invoke();
  60 }
  61 
  62 // When using JVMCI the address might be off by the size of a call instruction.
  63 bool CompiledMethod::is_deopt_entry(address pc) {
  64   return pc == deopt_handler_begin()
  65 #if INCLUDE_JVMCI
  66     || (is_compiled_by_jvmci() && pc == (deopt_handler_begin() + NativeCall::instruction_size))
  67 #endif
  68     ;
  69 }
  70 
  71 // Returns a string version of the method state.
  72 const char* CompiledMethod::state() const {
  73   int state = get_state();
  74   switch (state) {
  75   case in_use:
  76     return "in use";
  77   case not_used:
  78     return "not_used";
  79   case not_entrant:
  80     return "not_entrant";
  81   case zombie:
  82     return "zombie";
  83   case unloaded:
  84     return "unloaded";
  85   default:
  86     fatal("unexpected method state: %d", state);
  87     return NULL;
  88   }
  89 }
  90 
  91 //-----------------------------------------------------------------------------
  92 
  93 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
  94   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
  95   assert(new_entry != NULL,"Must be non null");
  96   assert(new_entry->next() == NULL, "Must be null");
  97 
  98   ExceptionCache *ec = exception_cache();
  99   if (ec != NULL) {
 100     new_entry->set_next(ec);
 101   }
 102   release_set_exception_cache(new_entry);
 103 }
 104 
 105 void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
 106   ExceptionCache* prev = NULL;
 107   ExceptionCache* curr = exception_cache();
 108 
 109   while (curr != NULL) {
 110     ExceptionCache* next = curr->next();
 111 
 112     Klass* ex_klass = curr->exception_type();
 113     if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
 114       if (prev == NULL) {
 115         set_exception_cache(next);
 116       } else {
 117         prev->set_next(next);
 118       }
 119       delete curr;
 120       // prev stays the same.
 121     } else {
 122       prev = curr;
 123     }
 124 
 125     curr = next;
 126   }
 127 }
 128 
 129 // public method for accessing the exception cache
 130 // These are the public access methods.
 131 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
 132   // We never grab a lock to read the exception cache, so we may
 133   // have false negatives. This is okay, as it can only happen during
 134   // the first few exception lookups for a given nmethod.
 135   ExceptionCache* ec = exception_cache();
 136   while (ec != NULL) {
 137     address ret_val;
 138     if ((ret_val = ec->match(exception,pc)) != NULL) {
 139       return ret_val;
 140     }
 141     ec = ec->next();
 142   }
 143   return NULL;
 144 }
 145 
 146 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
 147   // There are potential race conditions during exception cache updates, so we
 148   // must own the ExceptionCache_lock before doing ANY modifications. Because
 149   // we don't lock during reads, it is possible to have several threads attempt
 150   // to update the cache with the same data. We need to check for already inserted
 151   // copies of the current data before adding it.
 152 
 153   MutexLocker ml(ExceptionCache_lock);
 154   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
 155 
 156   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
 157     target_entry = new ExceptionCache(exception,pc,handler);
 158     add_exception_cache_entry(target_entry);
 159   }
 160 }
 161 
 162 //-------------end of code for ExceptionCache--------------
 163 
 164 // private method for handling exception cache
 165 // These methods are private, and used to manipulate the exception cache
 166 // directly.
 167 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
 168   ExceptionCache* ec = exception_cache();
 169   while (ec != NULL) {
 170     if (ec->match_exception_with_space(exception)) {
 171       return ec;
 172     }
 173     ec = ec->next();
 174   }
 175   return NULL;
 176 }
 177 
 178 bool CompiledMethod::is_at_poll_return(address pc) {
 179   RelocIterator iter(this, pc, pc+1);
 180   while (iter.next()) {
 181     if (iter.type() == relocInfo::poll_return_type)
 182       return true;
 183   }
 184   return false;
 185 }
 186 
 187 
 188 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
 189   RelocIterator iter(this, pc, pc+1);
 190   while (iter.next()) {
 191     relocInfo::relocType t = iter.type();
 192     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
 193       return true;
 194   }
 195   return false;
 196 }
 197 
 198 void CompiledMethod::verify_oop_relocations() {
 199   // Ensure sure that the code matches the current oop values
 200   RelocIterator iter(this, NULL, NULL);
 201   while (iter.next()) {
 202     if (iter.type() == relocInfo::oop_type) {
 203       oop_Relocation* reloc = iter.oop_reloc();
 204       if (!reloc->oop_is_immediate()) {
 205         reloc->verify_oop_relocation();
 206       }
 207     }
 208   }
 209 }
 210 
 211 
 212 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
 213   PcDesc* pd = pc_desc_at(pc);
 214   guarantee(pd != NULL, "scope must be present");
 215   return new ScopeDesc(this, pd->scope_decode_offset(),
 216                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 217                        pd->return_oop());
 218 }
 219 
 220 void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
 221   assert_locked_or_safepoint(CompiledIC_lock);
 222 
 223   // If the method is not entrant or zombie then a JMP is plastered over the
 224   // first few bytes.  If an oop in the old code was there, that oop
 225   // should not get GC'd.  Skip the first few bytes of oops on
 226   // not-entrant methods.
 227   address low_boundary = verified_entry_point();
 228   if (!is_in_use() && is_nmethod()) {
 229     low_boundary += NativeJump::instruction_size;
 230     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 231     // This means that the low_boundary is going to be a little too high.
 232     // This shouldn't matter, since oops of non-entrant methods are never used.
 233     // In fact, why are we bothering to look at oops in a non-entrant method??
 234   }
 235 
 236   // Find all calls in an nmethod and clear the ones that point to non-entrant,
 237   // zombie and unloaded nmethods.
 238   ResourceMark rm;
 239   RelocIterator iter(this, low_boundary);
 240   while(iter.next()) {
 241     switch(iter.type()) {
 242       case relocInfo::virtual_call_type:
 243       case relocInfo::opt_virtual_call_type: {
 244         CompiledIC *ic = CompiledIC_at(&iter);
 245         // Ok, to lookup references to zombies here
 246         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
 247         if( cb != NULL && cb->is_compiled() ) {
 248           CompiledMethod* nm = cb->as_compiled_method();
 249           // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 250           if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
 251         }
 252         break;
 253       }
 254       case relocInfo::static_call_type: {
 255           CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
 256           CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
 257           if( cb != NULL && cb->is_compiled() ) {
 258             CompiledMethod* cm = cb->as_compiled_method();
 259             // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 260             if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
 261               csc->set_to_clean();
 262             }
 263           }
 264         break;
 265       }
 266       default:
 267         break;
 268     }
 269   }
 270 }
 271 
 272 int CompiledMethod::verify_icholder_relocations() {
 273   ResourceMark rm;
 274   int count = 0;
 275 
 276   RelocIterator iter(this);
 277   while(iter.next()) {
 278     if (iter.type() == relocInfo::virtual_call_type) {
 279       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
 280         CompiledIC *ic = CompiledIC_at(&iter);
 281         if (TraceCompiledIC) {
 282           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
 283           ic->print();
 284         }
 285         assert(ic->cached_icholder() != NULL, "must be non-NULL");
 286         count++;
 287       }
 288     }
 289   }
 290 
 291   return count;
 292 }
 293 
 294 // Method that knows how to preserve outgoing arguments at call. This method must be
 295 // called with a frame corresponding to a Java invoke
 296 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
 297 #ifndef SHARK
 298   if (method() != NULL && !method()->is_native()) {
 299     address pc = fr.pc();
 300     SimpleScopeDesc ssd(this, pc);
 301     Bytecode_invoke call(ssd.method(), ssd.bci());
 302     bool has_receiver = call.has_receiver();
 303     bool has_appendix = call.has_appendix();
 304     Symbol* signature = call.signature();
 305 
 306     // The method attached by JIT-compilers should be used, if present.
 307     // Bytecode can be inaccurate in such case.
 308     Method* callee = attached_method_before_pc(pc);
 309     if (callee != NULL) {
 310       has_receiver = !(callee->access_flags().is_static());
 311       has_appendix = false;
 312       signature = callee->signature();
 313     }
 314 
 315     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
 316   }
 317 #endif // !SHARK
 318 }
 319 
 320 // -----------------------------------------------------------------------------
 321 // CompiledMethod::get_deopt_original_pc
 322 //
 323 // Return the original PC for the given PC if:
 324 // (a) the given PC belongs to a nmethod and
 325 // (b) it is a deopt PC
 326 address CompiledMethod::get_deopt_original_pc(const frame* fr) {
 327   if (fr->cb() == NULL)  return NULL;
 328 
 329   CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
 330   if (cm != NULL && cm->is_deopt_pc(fr->pc()))
 331     return cm->get_original_pc(fr);
 332 
 333   return NULL;
 334 }
 335 
 336 Method* CompiledMethod::attached_method(address call_instr) {
 337   assert(code_contains(call_instr), "not part of the nmethod");
 338   RelocIterator iter(this, call_instr, call_instr + 1);
 339   while (iter.next()) {
 340     if (iter.addr() == call_instr) {
 341       switch(iter.type()) {
 342         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
 343         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
 344         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
 345         default:                               break;
 346       }
 347     }
 348   }
 349   return NULL; // not found
 350 }
 351 
 352 Method* CompiledMethod::attached_method_before_pc(address pc) {
 353   if (NativeCall::is_call_before(pc)) {
 354     NativeCall* ncall = nativeCall_before(pc);
 355     return attached_method(ncall->instruction_address());
 356   }
 357   return NULL; // not a call
 358 }
 359 
 360 void CompiledMethod::clear_inline_caches() {
 361   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
 362   if (is_zombie()) {
 363     return;
 364   }
 365 
 366   RelocIterator iter(this);
 367   while (iter.next()) {
 368     iter.reloc()->clear_inline_cache();
 369   }
 370 }
 371 
 372 // Clear ICStubs of all compiled ICs
 373 void CompiledMethod::clear_ic_stubs() {
 374   assert_locked_or_safepoint(CompiledIC_lock);
 375   RelocIterator iter(this);
 376   while(iter.next()) {
 377     if (iter.type() == relocInfo::virtual_call_type) {
 378       CompiledIC* ic = CompiledIC_at(&iter);
 379       ic->clear_ic_stub();
 380     }
 381   }
 382 }
 383 
 384 #ifdef ASSERT
 385 
 386 class CheckClass : AllStatic {
 387   static BoolObjectClosure* _is_alive;
 388 
 389   // Check class_loader is alive for this bit of metadata.
 390   static void check_class(Metadata* md) {
 391     Klass* klass = NULL;
 392     if (md->is_klass()) {
 393       klass = ((Klass*)md);
 394     } else if (md->is_method()) {
 395       klass = ((Method*)md)->method_holder();
 396     } else if (md->is_methodData()) {
 397       klass = ((MethodData*)md)->method()->method_holder();
 398     } else {
 399       md->print();
 400       ShouldNotReachHere();
 401     }
 402     assert(klass->is_loader_alive(_is_alive), "must be alive");
 403   }
 404  public:
 405   static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) {
 406     assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
 407     _is_alive = is_alive;
 408     nm->metadata_do(check_class);
 409   }
 410 };
 411 
 412 // This is called during a safepoint so can use static data
 413 BoolObjectClosure* CheckClass::_is_alive = NULL;
 414 #endif // ASSERT
 415 
 416 
 417 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
 418   if (ic->is_icholder_call()) {
 419     // The only exception is compiledICHolder oops which may
 420     // yet be marked below. (We check this further below).
 421     CompiledICHolder* cichk_oop = ic->cached_icholder();
 422 
 423     if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
 424         cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
 425       return;
 426     }
 427   } else {
 428     Metadata* ic_oop = ic->cached_metadata();
 429     if (ic_oop != NULL) {
 430       if (ic_oop->is_klass()) {
 431         if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
 432           return;
 433         }
 434       } else if (ic_oop->is_method()) {
 435         if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
 436           return;
 437         }
 438       } else {
 439         ShouldNotReachHere();
 440       }
 441     }
 442   }
 443 
 444   ic->set_to_clean();
 445 }
 446 
 447 unsigned char CompiledMethod::_global_unloading_clock = 0;
 448 
 449 void CompiledMethod::increase_unloading_clock() {
 450   _global_unloading_clock++;
 451   if (_global_unloading_clock == 0) {
 452     // _nmethods are allocated with _unloading_clock == 0,
 453     // so 0 is never used as a clock value.
 454     _global_unloading_clock = 1;
 455   }
 456 }
 457 
 458 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
 459   OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
 460 }
 461 
 462 unsigned char CompiledMethod::unloading_clock() {
 463   return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
 464 }
 465 
 466 // Processing of oop references should have been sufficient to keep
 467 // all strong references alive.  Any weak references should have been
 468 // cleared as well.  Visit all the metadata and ensure that it's
 469 // really alive.
 470 void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
 471 #ifdef ASSERT
 472     RelocIterator iter(this, low_boundary);
 473     while (iter.next()) {
 474     // static_stub_Relocations may have dangling references to
 475     // Method*s so trim them out here.  Otherwise it looks like
 476     // compiled code is maintaining a link to dead metadata.
 477     address static_call_addr = NULL;
 478     if (iter.type() == relocInfo::opt_virtual_call_type) {
 479       CompiledIC* cic = CompiledIC_at(&iter);
 480       if (!cic->is_call_to_interpreted()) {
 481         static_call_addr = iter.addr();
 482       }
 483     } else if (iter.type() == relocInfo::static_call_type) {
 484       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
 485       if (!csc->is_call_to_interpreted()) {
 486         static_call_addr = iter.addr();
 487       }
 488     }
 489     if (static_call_addr != NULL) {
 490       RelocIterator sciter(this, low_boundary);
 491       while (sciter.next()) {
 492         if (sciter.type() == relocInfo::static_stub_type &&
 493             sciter.static_stub_reloc()->static_call() == static_call_addr) {
 494           sciter.static_stub_reloc()->clear_inline_cache();
 495         }
 496       }
 497     }
 498   }
 499   // Check that the metadata embedded in the nmethod is alive
 500   CheckClass::do_check_class(is_alive, this);
 501 #endif
 502 }
 503 
 504 // This is called at the end of the strong tracing/marking phase of a
 505 // GC to unload an nmethod if it contains otherwise unreachable
 506 // oops.
 507 
 508 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 509   // Make sure the oop's ready to receive visitors
 510   assert(!is_zombie() && !is_unloaded(),
 511          "should not call follow on zombie or unloaded nmethod");
 512 
 513   // If the method is not entrant then a JMP is plastered over the
 514   // first few bytes.  If an oop in the old code was there, that oop
 515   // should not get GC'd.  Skip the first few bytes of oops on
 516   // not-entrant methods.
 517   address low_boundary = verified_entry_point();
 518   if (is_not_entrant()) {
 519     low_boundary += NativeJump::instruction_size;
 520     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 521     // (See comment above.)
 522   }
 523 
 524   // The RedefineClasses() API can cause the class unloading invariant
 525   // to no longer be true. See jvmtiExport.hpp for details.
 526   // Also, leave a debugging breadcrumb in local flag.
 527   if (JvmtiExport::has_redefined_a_class()) {
 528     // This set of the unloading_occurred flag is done before the
 529     // call to post_compiled_method_unload() so that the unloading
 530     // of this nmethod is reported.
 531     unloading_occurred = true;
 532   }
 533 
 534   // Exception cache
 535   clean_exception_cache(is_alive);
 536 
 537   // If class unloading occurred we first iterate over all inline caches and
 538   // clear ICs where the cached oop is referring to an unloaded klass or method.
 539   // The remaining live cached oops will be traversed in the relocInfo::oop_type
 540   // iteration below.
 541   if (unloading_occurred) {
 542     RelocIterator iter(this, low_boundary);
 543     while(iter.next()) {
 544       if (iter.type() == relocInfo::virtual_call_type) {
 545         CompiledIC *ic = CompiledIC_at(&iter);
 546         clean_ic_if_metadata_is_dead(ic, is_alive);
 547       }
 548     }
 549   }
 550 
 551   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 552     return;
 553   }
 554 
 555 #if INCLUDE_JVMCI
 556   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 557     return;
 558   }
 559 #endif
 560 
 561   // Ensure that all metadata is still alive
 562   verify_metadata_loaders(low_boundary, is_alive);
 563 }
 564 
 565 template <class CompiledICorStaticCall>
 566 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) {
 567   // Ok, to lookup references to zombies here
 568   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
 569   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
 570   if (nm != NULL) {
 571     if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
 572       // The nmethod has not been processed yet.
 573       return true;
 574     }
 575 
 576     // Clean inline caches pointing to both zombie and not_entrant methods
 577     if (!nm->is_in_use() || (nm->method()->code() != nm)) {
 578       ic->set_to_clean();
 579       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
 580     }
 581   }
 582 
 583   return false;
 584 }
 585 
 586 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) {
 587   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
 588 }
 589 
 590 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) {
 591   return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
 592 }
 593 
 594 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
 595   ResourceMark rm;
 596 
 597   // Make sure the oop's ready to receive visitors
 598   assert(!is_zombie() && !is_unloaded(),
 599          "should not call follow on zombie or unloaded nmethod");
 600 
 601   // If the method is not entrant then a JMP is plastered over the
 602   // first few bytes.  If an oop in the old code was there, that oop
 603   // should not get GC'd.  Skip the first few bytes of oops on
 604   // not-entrant methods.
 605   address low_boundary = verified_entry_point();
 606   if (is_not_entrant()) {
 607     low_boundary += NativeJump::instruction_size;
 608     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 609     // (See comment above.)
 610   }
 611 
 612   // The RedefineClasses() API can cause the class unloading invariant
 613   // to no longer be true. See jvmtiExport.hpp for details.
 614   // Also, leave a debugging breadcrumb in local flag.
 615   if (JvmtiExport::has_redefined_a_class()) {
 616     // This set of the unloading_occurred flag is done before the
 617     // call to post_compiled_method_unload() so that the unloading
 618     // of this nmethod is reported.
 619     unloading_occurred = true;
 620   }
 621 
 622   // Exception cache
 623   clean_exception_cache(is_alive);
 624 
 625   bool postponed = false;
 626 
 627   RelocIterator iter(this, low_boundary);
 628   while(iter.next()) {
 629 
 630     switch (iter.type()) {
 631 
 632     case relocInfo::virtual_call_type:
 633       if (unloading_occurred) {
 634         // If class unloading occurred we first iterate over all inline caches and
 635         // clear ICs where the cached oop is referring to an unloaded klass or method.
 636         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
 637       }
 638 
 639       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 640       break;
 641 
 642     case relocInfo::opt_virtual_call_type:
 643       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 644       break;
 645 
 646     case relocInfo::static_call_type:
 647       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 648       break;
 649 
 650     case relocInfo::oop_type:
 651       // handled by do_unloading_oops below
 652       break;
 653 
 654     case relocInfo::metadata_type:
 655       break; // nothing to do.
 656 
 657     default:
 658       break;
 659     }
 660   }
 661 
 662   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 663     return postponed;
 664   }
 665 
 666 #if INCLUDE_JVMCI
 667   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 668     return postponed;
 669   }
 670 #endif
 671 
 672   // Ensure that all metadata is still alive
 673   verify_metadata_loaders(low_boundary, is_alive);
 674 
 675   return postponed;
 676 }
 677 
 678 void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
 679   ResourceMark rm;
 680 
 681   // Make sure the oop's ready to receive visitors
 682   assert(!is_zombie(),
 683          "should not call follow on zombie nmethod");
 684 
 685   // If the method is not entrant then a JMP is plastered over the
 686   // first few bytes.  If an oop in the old code was there, that oop
 687   // should not get GC'd.  Skip the first few bytes of oops on
 688   // not-entrant methods.
 689   address low_boundary = verified_entry_point();
 690   if (is_not_entrant()) {
 691     low_boundary += NativeJump::instruction_size;
 692     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 693     // (See comment above.)
 694   }
 695 
 696   RelocIterator iter(this, low_boundary);
 697   while(iter.next()) {
 698 
 699     switch (iter.type()) {
 700 
 701     case relocInfo::virtual_call_type:
 702       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 703       break;
 704 
 705     case relocInfo::opt_virtual_call_type:
 706       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 707       break;
 708 
 709     case relocInfo::static_call_type:
 710       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 711       break;
 712 
 713     default:
 714       break;
 715     }
 716   }
 717 }