1 /*
   2  * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/compiledIC.hpp"
  27 #include "code/scopeDesc.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "prims/methodHandles.hpp"
  30 #include "interpreter/bytecode.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "runtime/mutexLocker.hpp"
  33 
  34 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
  35   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  36   _method(method), _mark_for_deoptimization_status(not_marked) {
  37   init_defaults();
  38 }
  39 
  40 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
  41   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  42   _method(method), _mark_for_deoptimization_status(not_marked) {
  43   init_defaults();
  44 }
  45 
  46 void CompiledMethod::init_defaults() {
  47   _has_unsafe_access          = 0;
  48   _has_method_handle_invokes  = 0;
  49   _lazy_critical_native       = 0;
  50   _has_wide_vectors           = 0;
  51   _unloading_clock            = 0;
  52 }
  53 
  54 bool CompiledMethod::is_method_handle_return(address return_pc) {
  55   if (!has_method_handle_invokes())  return false;
  56   PcDesc* pd = pc_desc_at(return_pc);
  57   if (pd == NULL)
  58     return false;
  59   return pd->is_method_handle_invoke();
  60 }
  61 
  62 // When using JVMCI the address might be off by the size of a call instruction.
  63 bool CompiledMethod::is_deopt_entry(address pc) {
  64   return pc == deopt_handler_begin()
  65 #if INCLUDE_JVMCI
  66     || (is_compiled_by_jvmci() && pc == (deopt_handler_begin() + NativeCall::instruction_size))
  67 #endif
  68     ;
  69 }
  70 
  71 // Returns a string version of the method state.
  72 const char* CompiledMethod::state() const {
  73   int state = get_state();
  74   switch (state) {
  75   case in_use:
  76     return "in use";
  77   case not_used:
  78     return "not_used";
  79   case not_entrant:
  80     return "not_entrant";
  81   case zombie:
  82     return "zombie";
  83   case unloaded:
  84     return "unloaded";
  85   default:
  86     fatal("unexpected method state: %d", state);
  87     return NULL;
  88   }
  89 }
  90 
  91 //-----------------------------------------------------------------------------
  92 
  93 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
  94   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
  95   assert(new_entry != NULL,"Must be non null");
  96   assert(new_entry->next() == NULL, "Must be null");
  97 
  98   ExceptionCache *ec = exception_cache();
  99   if (ec != NULL) {
 100     new_entry->set_next(ec);
 101   }
 102   release_set_exception_cache(new_entry);
 103 }
 104 
 105 void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
 106   ExceptionCache* prev = NULL;
 107   ExceptionCache* curr = exception_cache();
 108 
 109   while (curr != NULL) {
 110     ExceptionCache* next = curr->next();
 111 
 112     Klass* ex_klass = curr->exception_type();
 113     if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
 114       if (prev == NULL) {
 115         set_exception_cache(next);
 116       } else {
 117         prev->set_next(next);
 118       }
 119       delete curr;
 120       // prev stays the same.
 121     } else {
 122       prev = curr;
 123     }
 124 
 125     curr = next;
 126   }
 127 }
 128 
 129 // public method for accessing the exception cache
 130 // These are the public access methods.
 131 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
 132   // We never grab a lock to read the exception cache, so we may
 133   // have false negatives. This is okay, as it can only happen during
 134   // the first few exception lookups for a given nmethod.
 135   ExceptionCache* ec = exception_cache();
 136   while (ec != NULL) {
 137     address ret_val;
 138     if ((ret_val = ec->match(exception,pc)) != NULL) {
 139       return ret_val;
 140     }
 141     ec = ec->next();
 142   }
 143   return NULL;
 144 }
 145 
 146 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
 147   // There are potential race conditions during exception cache updates, so we
 148   // must own the ExceptionCache_lock before doing ANY modifications. Because
 149   // we don't lock during reads, it is possible to have several threads attempt
 150   // to update the cache with the same data. We need to check for already inserted
 151   // copies of the current data before adding it.
 152 
 153   MutexLocker ml(ExceptionCache_lock);
 154   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
 155 
 156   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
 157     target_entry = new ExceptionCache(exception,pc,handler);
 158     add_exception_cache_entry(target_entry);
 159   }
 160 }
 161 
 162 //-------------end of code for ExceptionCache--------------
 163 
 164 // private method for handling exception cache
 165 // These methods are private, and used to manipulate the exception cache
 166 // directly.
 167 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
 168   ExceptionCache* ec = exception_cache();
 169   while (ec != NULL) {
 170     if (ec->match_exception_with_space(exception)) {
 171       return ec;
 172     }
 173     ec = ec->next();
 174   }
 175   return NULL;
 176 }
 177 
 178 bool CompiledMethod::is_at_poll_return(address pc) {
 179   RelocIterator iter(this, pc, pc+1);
 180   while (iter.next()) {
 181     if (iter.type() == relocInfo::poll_return_type)
 182       return true;
 183   }
 184   return false;
 185 }
 186 
 187 
 188 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
 189   RelocIterator iter(this, pc, pc+1);
 190   while (iter.next()) {
 191     relocInfo::relocType t = iter.type();
 192     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
 193       return true;
 194   }
 195   return false;
 196 }
 197 
 198 void CompiledMethod::verify_oop_relocations() {
 199   // Ensure sure that the code matches the current oop values
 200   RelocIterator iter(this, NULL, NULL);
 201   while (iter.next()) {
 202     if (iter.type() == relocInfo::oop_type) {
 203       oop_Relocation* reloc = iter.oop_reloc();
 204       if (!reloc->oop_is_immediate()) {
 205         reloc->verify_oop_relocation();
 206       }
 207     }
 208   }
 209 }
 210 
 211 
 212 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
 213   PcDesc* pd = pc_desc_at(pc);
 214   guarantee(pd != NULL, "scope must be present");
 215   return new ScopeDesc(this, pd->scope_decode_offset(),
 216                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 217                        pd->return_oop());
 218 }
 219 
 220 void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
 221   assert_locked_or_safepoint(CompiledIC_lock);
 222 
 223   // If the method is not entrant or zombie then a JMP is plastered over the
 224   // first few bytes.  If an oop in the old code was there, that oop
 225   // should not get GC'd.  Skip the first few bytes of oops on
 226   // not-entrant methods.
 227   address low_boundary = verified_entry_point();
 228   if (!is_in_use() && is_nmethod()) {
 229     low_boundary += NativeJump::instruction_size;
 230     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 231     // This means that the low_boundary is going to be a little too high.
 232     // This shouldn't matter, since oops of non-entrant methods are never used.
 233     // In fact, why are we bothering to look at oops in a non-entrant method??
 234   }
 235 
 236   // Find all calls in an nmethod and clear the ones that point to non-entrant,
 237   // zombie and unloaded nmethods.
 238   ResourceMark rm;
 239   RelocIterator iter(this, low_boundary);
 240   while(iter.next()) {
 241     switch(iter.type()) {
 242       case relocInfo::virtual_call_type:
 243       case relocInfo::opt_virtual_call_type: {
 244         CompiledIC *ic = CompiledIC_at(&iter);
 245         // Ok, to lookup references to zombies here
 246         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
 247         if( cb != NULL && cb->is_compiled() ) {
 248           CompiledMethod* nm = cb->as_compiled_method();
 249           // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 250           if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
 251         }
 252         break;
 253       }
 254       case relocInfo::static_call_type: {
 255           CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
 256           CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
 257           if( cb != NULL && cb->is_compiled() ) {
 258             CompiledMethod* cm = cb->as_compiled_method();
 259             // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 260             if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
 261               csc->set_to_clean();
 262             }
 263           }
 264         break;
 265       }
 266     }
 267   }
 268 }
 269 
 270 int CompiledMethod::verify_icholder_relocations() {
 271   ResourceMark rm;
 272   int count = 0;
 273 
 274   RelocIterator iter(this);
 275   while(iter.next()) {
 276     if (iter.type() == relocInfo::virtual_call_type) {
 277       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
 278         CompiledIC *ic = CompiledIC_at(&iter);
 279         if (TraceCompiledIC) {
 280           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
 281           ic->print();
 282         }
 283         assert(ic->cached_icholder() != NULL, "must be non-NULL");
 284         count++;
 285       }
 286     }
 287   }
 288 
 289   return count;
 290 }
 291 
 292 // Method that knows how to preserve outgoing arguments at call. This method must be
 293 // called with a frame corresponding to a Java invoke
 294 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
 295 #ifndef SHARK
 296   if (method() != NULL && !method()->is_native()) {
 297     address pc = fr.pc();
 298     SimpleScopeDesc ssd(this, pc);
 299     Bytecode_invoke call(ssd.method(), ssd.bci());
 300     bool has_receiver = call.has_receiver();
 301     bool has_appendix = call.has_appendix();
 302     Symbol* signature = call.signature();
 303 
 304     // The method attached by JIT-compilers should be used, if present.
 305     // Bytecode can be inaccurate in such case.
 306     Method* callee = attached_method_before_pc(pc);
 307     if (callee != NULL) {
 308       has_receiver = !(callee->access_flags().is_static());
 309       has_appendix = false;
 310       signature = callee->signature();
 311     }
 312 
 313     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
 314   }
 315 #endif // !SHARK
 316 }
 317 
 318 // -----------------------------------------------------------------------------
 319 // CompiledMethod::get_deopt_original_pc
 320 //
 321 // Return the original PC for the given PC if:
 322 // (a) the given PC belongs to a nmethod and
 323 // (b) it is a deopt PC
 324 address CompiledMethod::get_deopt_original_pc(const frame* fr) {
 325   if (fr->cb() == NULL)  return NULL;
 326 
 327   CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
 328   if (cm != NULL && cm->is_deopt_pc(fr->pc()))
 329     return cm->get_original_pc(fr);
 330 
 331   return NULL;
 332 }
 333 
 334 Method* CompiledMethod::attached_method(address call_instr) {
 335   assert(code_contains(call_instr), "not part of the nmethod");
 336   RelocIterator iter(this, call_instr, call_instr + 1);
 337   while (iter.next()) {
 338     if (iter.addr() == call_instr) {
 339       switch(iter.type()) {
 340         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
 341         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
 342         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
 343       }
 344     }
 345   }
 346   return NULL; // not found
 347 }
 348 
 349 Method* CompiledMethod::attached_method_before_pc(address pc) {
 350   if (NativeCall::is_call_before(pc)) {
 351     NativeCall* ncall = nativeCall_before(pc);
 352     return attached_method(ncall->instruction_address());
 353   }
 354   return NULL; // not a call
 355 }
 356 
 357 void CompiledMethod::clear_inline_caches() {
 358   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
 359   if (is_zombie()) {
 360     return;
 361   }
 362 
 363   RelocIterator iter(this);
 364   while (iter.next()) {
 365     iter.reloc()->clear_inline_cache();
 366   }
 367 }
 368 
 369 // Clear ICStubs of all compiled ICs
 370 void CompiledMethod::clear_ic_stubs() {
 371   assert_locked_or_safepoint(CompiledIC_lock);
 372   RelocIterator iter(this);
 373   while(iter.next()) {
 374     if (iter.type() == relocInfo::virtual_call_type) {
 375       CompiledIC* ic = CompiledIC_at(&iter);
 376       ic->clear_ic_stub();
 377     }
 378   }
 379 }
 380 
 381 #ifdef ASSERT
 382 
 383 class CheckClass : AllStatic {
 384   static BoolObjectClosure* _is_alive;
 385 
 386   // Check class_loader is alive for this bit of metadata.
 387   static void check_class(Metadata* md) {
 388     Klass* klass = NULL;
 389     if (md->is_klass()) {
 390       klass = ((Klass*)md);
 391     } else if (md->is_method()) {
 392       klass = ((Method*)md)->method_holder();
 393     } else if (md->is_methodData()) {
 394       klass = ((MethodData*)md)->method()->method_holder();
 395     } else {
 396       md->print();
 397       ShouldNotReachHere();
 398     }
 399     assert(klass->is_loader_alive(_is_alive), "must be alive");
 400   }
 401  public:
 402   static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) {
 403     assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
 404     _is_alive = is_alive;
 405     nm->metadata_do(check_class);
 406   }
 407 };
 408 
 409 // This is called during a safepoint so can use static data
 410 BoolObjectClosure* CheckClass::_is_alive = NULL;
 411 #endif // ASSERT
 412 
 413 
 414 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
 415   if (ic->is_icholder_call()) {
 416     // The only exception is compiledICHolder oops which may
 417     // yet be marked below. (We check this further below).
 418     CompiledICHolder* cichk_oop = ic->cached_icholder();
 419 
 420     if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
 421         cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
 422       return;
 423     }
 424   } else {
 425     Metadata* ic_oop = ic->cached_metadata();
 426     if (ic_oop != NULL) {
 427       if (ic_oop->is_klass()) {
 428         if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
 429           return;
 430         }
 431       } else if (ic_oop->is_method()) {
 432         if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
 433           return;
 434         }
 435       } else {
 436         ShouldNotReachHere();
 437       }
 438     }
 439   }
 440 
 441   ic->set_to_clean();
 442 }
 443 
 444 unsigned char CompiledMethod::_global_unloading_clock = 0;
 445 
 446 void CompiledMethod::increase_unloading_clock() {
 447   _global_unloading_clock++;
 448   if (_global_unloading_clock == 0) {
 449     // _nmethods are allocated with _unloading_clock == 0,
 450     // so 0 is never used as a clock value.
 451     _global_unloading_clock = 1;
 452   }
 453 }
 454 
 455 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
 456   OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
 457 }
 458 
 459 unsigned char CompiledMethod::unloading_clock() {
 460   return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
 461 }
 462 
 463 // Processing of oop references should have been sufficient to keep
 464 // all strong references alive.  Any weak references should have been
 465 // cleared as well.  Visit all the metadata and ensure that it's
 466 // really alive.
 467 void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
 468 #ifdef ASSERT
 469     RelocIterator iter(this, low_boundary);
 470     while (iter.next()) {
 471     // static_stub_Relocations may have dangling references to
 472     // Method*s so trim them out here.  Otherwise it looks like
 473     // compiled code is maintaining a link to dead metadata.
 474     address static_call_addr = NULL;
 475     if (iter.type() == relocInfo::opt_virtual_call_type) {
 476       CompiledIC* cic = CompiledIC_at(&iter);
 477       if (!cic->is_call_to_interpreted()) {
 478         static_call_addr = iter.addr();
 479       }
 480     } else if (iter.type() == relocInfo::static_call_type) {
 481       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
 482       if (!csc->is_call_to_interpreted()) {
 483         static_call_addr = iter.addr();
 484       }
 485     }
 486     if (static_call_addr != NULL) {
 487       RelocIterator sciter(this, low_boundary);
 488       while (sciter.next()) {
 489         if (sciter.type() == relocInfo::static_stub_type &&
 490             sciter.static_stub_reloc()->static_call() == static_call_addr) {
 491           sciter.static_stub_reloc()->clear_inline_cache();
 492         }
 493       }
 494     }
 495   }
 496   // Check that the metadata embedded in the nmethod is alive
 497   CheckClass::do_check_class(is_alive, this);
 498 #endif
 499 }
 500 
 501 // This is called at the end of the strong tracing/marking phase of a
 502 // GC to unload an nmethod if it contains otherwise unreachable
 503 // oops.
 504 
 505 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 506   // Make sure the oop's ready to receive visitors
 507   assert(!is_zombie() && !is_unloaded(),
 508          "should not call follow on zombie or unloaded nmethod");
 509 
 510   // If the method is not entrant then a JMP is plastered over the
 511   // first few bytes.  If an oop in the old code was there, that oop
 512   // should not get GC'd.  Skip the first few bytes of oops on
 513   // not-entrant methods.
 514   address low_boundary = verified_entry_point();
 515   if (is_not_entrant()) {
 516     low_boundary += NativeJump::instruction_size;
 517     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 518     // (See comment above.)
 519   }
 520 
 521   // The RedefineClasses() API can cause the class unloading invariant
 522   // to no longer be true. See jvmtiExport.hpp for details.
 523   // Also, leave a debugging breadcrumb in local flag.
 524   if (JvmtiExport::has_redefined_a_class()) {
 525     // This set of the unloading_occurred flag is done before the
 526     // call to post_compiled_method_unload() so that the unloading
 527     // of this nmethod is reported.
 528     unloading_occurred = true;
 529   }
 530 
 531   // Exception cache
 532   clean_exception_cache(is_alive);
 533 
 534   // If class unloading occurred we first iterate over all inline caches and
 535   // clear ICs where the cached oop is referring to an unloaded klass or method.
 536   // The remaining live cached oops will be traversed in the relocInfo::oop_type
 537   // iteration below.
 538   if (unloading_occurred) {
 539     RelocIterator iter(this, low_boundary);
 540     while(iter.next()) {
 541       if (iter.type() == relocInfo::virtual_call_type) {
 542         CompiledIC *ic = CompiledIC_at(&iter);
 543         clean_ic_if_metadata_is_dead(ic, is_alive);
 544       }
 545     }
 546   }
 547 
 548   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 549     return;
 550   }
 551 
 552 #if INCLUDE_JVMCI
 553   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 554     return;
 555   }
 556 #endif
 557 
 558   // Ensure that all metadata is still alive
 559   verify_metadata_loaders(low_boundary, is_alive);
 560 }
 561 
 562 template <class CompiledICorStaticCall>
 563 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) {
 564   // Ok, to lookup references to zombies here
 565   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
 566   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
 567   if (nm != NULL) {
 568     if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
 569       // The nmethod has not been processed yet.
 570       return true;
 571     }
 572 
 573     // Clean inline caches pointing to both zombie and not_entrant methods
 574     if (!nm->is_in_use() || (nm->method()->code() != nm)) {
 575       ic->set_to_clean();
 576       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
 577     }
 578   }
 579 
 580   return false;
 581 }
 582 
 583 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) {
 584   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
 585 }
 586 
 587 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) {
 588   return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
 589 }
 590 
 591 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
 592   ResourceMark rm;
 593 
 594   // Make sure the oop's ready to receive visitors
 595   assert(!is_zombie() && !is_unloaded(),
 596          "should not call follow on zombie or unloaded nmethod");
 597 
 598   // If the method is not entrant then a JMP is plastered over the
 599   // first few bytes.  If an oop in the old code was there, that oop
 600   // should not get GC'd.  Skip the first few bytes of oops on
 601   // not-entrant methods.
 602   address low_boundary = verified_entry_point();
 603   if (is_not_entrant()) {
 604     low_boundary += NativeJump::instruction_size;
 605     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 606     // (See comment above.)
 607   }
 608 
 609   // The RedefineClasses() API can cause the class unloading invariant
 610   // to no longer be true. See jvmtiExport.hpp for details.
 611   // Also, leave a debugging breadcrumb in local flag.
 612   if (JvmtiExport::has_redefined_a_class()) {
 613     // This set of the unloading_occurred flag is done before the
 614     // call to post_compiled_method_unload() so that the unloading
 615     // of this nmethod is reported.
 616     unloading_occurred = true;
 617   }
 618 
 619   // Exception cache
 620   clean_exception_cache(is_alive);
 621 
 622   bool postponed = false;
 623 
 624   RelocIterator iter(this, low_boundary);
 625   while(iter.next()) {
 626 
 627     switch (iter.type()) {
 628 
 629     case relocInfo::virtual_call_type:
 630       if (unloading_occurred) {
 631         // If class unloading occurred we first iterate over all inline caches and
 632         // clear ICs where the cached oop is referring to an unloaded klass or method.
 633         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
 634       }
 635 
 636       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 637       break;
 638 
 639     case relocInfo::opt_virtual_call_type:
 640       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 641       break;
 642 
 643     case relocInfo::static_call_type:
 644       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 645       break;
 646 
 647     case relocInfo::oop_type:
 648       // handled by do_unloading_oops below
 649       break;
 650 
 651     case relocInfo::metadata_type:
 652       break; // nothing to do.
 653     }
 654   }
 655 
 656   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 657     return postponed;
 658   }
 659 
 660 #if INCLUDE_JVMCI
 661   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 662     return postponed;
 663   }
 664 #endif
 665 
 666   // Ensure that all metadata is still alive
 667   verify_metadata_loaders(low_boundary, is_alive);
 668 
 669   return postponed;
 670 }
 671 
 672 void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
 673   ResourceMark rm;
 674 
 675   // Make sure the oop's ready to receive visitors
 676   assert(!is_zombie(),
 677          "should not call follow on zombie nmethod");
 678 
 679   // If the method is not entrant then a JMP is plastered over the
 680   // first few bytes.  If an oop in the old code was there, that oop
 681   // should not get GC'd.  Skip the first few bytes of oops on
 682   // not-entrant methods.
 683   address low_boundary = verified_entry_point();
 684   if (is_not_entrant()) {
 685     low_boundary += NativeJump::instruction_size;
 686     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 687     // (See comment above.)
 688   }
 689 
 690   RelocIterator iter(this, low_boundary);
 691   while(iter.next()) {
 692 
 693     switch (iter.type()) {
 694 
 695     case relocInfo::virtual_call_type:
 696       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 697       break;
 698 
 699     case relocInfo::opt_virtual_call_type:
 700       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 701       break;
 702 
 703     case relocInfo::static_call_type:
 704       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 705       break;
 706     }
 707   }
 708 }