1 /*
   2  * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/compiledIC.hpp"
  27 #include "code/scopeDesc.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "prims/methodHandles.hpp"
  30 #include "interpreter/bytecode.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "runtime/mutexLocker.hpp"
  33 
  34 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
  35   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  36   _method(method), _mark_for_deoptimization_status(not_marked) {
  37   init_defaults();
  38 }
  39 
  40 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
  41   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  42   _method(method), _mark_for_deoptimization_status(not_marked) {
  43   init_defaults();
  44 }
  45 
  46 void CompiledMethod::init_defaults() {
  47   _has_unsafe_access          = 0;
  48   _has_method_handle_invokes  = 0;
  49   _lazy_critical_native       = 0;
  50   _has_wide_vectors           = 0;
  51   _unloading_clock            = 0;
  52 }
  53 
  54 bool CompiledMethod::is_method_handle_return(address return_pc) {
  55   if (!has_method_handle_invokes())  return false;
  56   PcDesc* pd = pc_desc_at(return_pc);
  57   if (pd == NULL)
  58     return false;
  59   return pd->is_method_handle_invoke();
  60 }
  61 
  62 // Returns a string version of the method state.
  63 const char* CompiledMethod::state() const {
  64   int state = get_state();
  65   switch (state) {
  66   case in_use:
  67     return "in use";
  68   case not_used:
  69     return "not_used";
  70   case not_entrant:
  71     return "not_entrant";
  72   case zombie:
  73     return "zombie";
  74   case unloaded:
  75     return "unloaded";
  76   default:
  77     fatal("unexpected method state: %d", state);
  78     return NULL;
  79   }
  80 }
  81 
  82 //-----------------------------------------------------------------------------
  83 
  84 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
  85   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
  86   assert(new_entry != NULL,"Must be non null");
  87   assert(new_entry->next() == NULL, "Must be null");
  88 
  89   ExceptionCache *ec = exception_cache();
  90   if (ec != NULL) {
  91     new_entry->set_next(ec);
  92   }
  93   release_set_exception_cache(new_entry);
  94 }
  95 
  96 void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
  97   ExceptionCache* prev = NULL;
  98   ExceptionCache* curr = exception_cache();
  99 
 100   while (curr != NULL) {
 101     ExceptionCache* next = curr->next();
 102 
 103     Klass* ex_klass = curr->exception_type();
 104     if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
 105       if (prev == NULL) {
 106         set_exception_cache(next);
 107       } else {
 108         prev->set_next(next);
 109       }
 110       delete curr;
 111       // prev stays the same.
 112     } else {
 113       prev = curr;
 114     }
 115 
 116     curr = next;
 117   }
 118 }
 119 
 120 // public method for accessing the exception cache
 121 // These are the public access methods.
 122 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
 123   // We never grab a lock to read the exception cache, so we may
 124   // have false negatives. This is okay, as it can only happen during
 125   // the first few exception lookups for a given nmethod.
 126   ExceptionCache* ec = exception_cache();
 127   while (ec != NULL) {
 128     address ret_val;
 129     if ((ret_val = ec->match(exception,pc)) != NULL) {
 130       return ret_val;
 131     }
 132     ec = ec->next();
 133   }
 134   return NULL;
 135 }
 136 
 137 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
 138   // There are potential race conditions during exception cache updates, so we
 139   // must own the ExceptionCache_lock before doing ANY modifications. Because
 140   // we don't lock during reads, it is possible to have several threads attempt
 141   // to update the cache with the same data. We need to check for already inserted
 142   // copies of the current data before adding it.
 143 
 144   MutexLocker ml(ExceptionCache_lock);
 145   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
 146 
 147   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
 148     target_entry = new ExceptionCache(exception,pc,handler);
 149     add_exception_cache_entry(target_entry);
 150   }
 151 }
 152 
 153 //-------------end of code for ExceptionCache--------------
 154 
 155 // private method for handling exception cache
 156 // These methods are private, and used to manipulate the exception cache
 157 // directly.
 158 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
 159   ExceptionCache* ec = exception_cache();
 160   while (ec != NULL) {
 161     if (ec->match_exception_with_space(exception)) {
 162       return ec;
 163     }
 164     ec = ec->next();
 165   }
 166   return NULL;
 167 }
 168 
 169 bool CompiledMethod::is_at_poll_return(address pc) {
 170   RelocIterator iter(this, pc, pc+1);
 171   while (iter.next()) {
 172     if (iter.type() == relocInfo::poll_return_type)
 173       return true;
 174   }
 175   return false;
 176 }
 177 
 178 
 179 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
 180   RelocIterator iter(this, pc, pc+1);
 181   while (iter.next()) {
 182     relocInfo::relocType t = iter.type();
 183     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
 184       return true;
 185   }
 186   return false;
 187 }
 188 
 189 void CompiledMethod::verify_oop_relocations() {
 190   // Ensure sure that the code matches the current oop values
 191   RelocIterator iter(this, NULL, NULL);
 192   while (iter.next()) {
 193     if (iter.type() == relocInfo::oop_type) {
 194       oop_Relocation* reloc = iter.oop_reloc();
 195       if (!reloc->oop_is_immediate()) {
 196         reloc->verify_oop_relocation();
 197       }
 198     }
 199   }
 200 }
 201 
 202 
 203 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
 204   PcDesc* pd = pc_desc_at(pc);
 205   guarantee(pd != NULL, "scope must be present");
 206   return new ScopeDesc(this, pd->scope_decode_offset(),
 207                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 208                        pd->return_oop());
 209 }
 210 
 211 void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
 212   assert_locked_or_safepoint(CompiledIC_lock);
 213 
 214   // If the method is not entrant or zombie then a JMP is plastered over the
 215   // first few bytes.  If an oop in the old code was there, that oop
 216   // should not get GC'd.  Skip the first few bytes of oops on
 217   // not-entrant methods.
 218   address low_boundary = verified_entry_point();
 219   if (!is_in_use() && is_nmethod()) {
 220     low_boundary += NativeJump::instruction_size;
 221     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 222     // This means that the low_boundary is going to be a little too high.
 223     // This shouldn't matter, since oops of non-entrant methods are never used.
 224     // In fact, why are we bothering to look at oops in a non-entrant method??
 225   }
 226 
 227   // Find all calls in an nmethod and clear the ones that point to non-entrant,
 228   // zombie and unloaded nmethods.
 229   ResourceMark rm;
 230   RelocIterator iter(this, low_boundary);
 231   while(iter.next()) {
 232     switch(iter.type()) {
 233       case relocInfo::virtual_call_type:
 234       case relocInfo::opt_virtual_call_type: {
 235         CompiledIC *ic = CompiledIC_at(&iter);
 236         // Ok, to lookup references to zombies here
 237         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
 238         if( cb != NULL && cb->is_compiled() ) {
 239           CompiledMethod* nm = cb->as_compiled_method();
 240           // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 241           if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
 242         }
 243         break;
 244       }
 245       case relocInfo::static_call_type: {
 246           CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
 247           CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
 248           if( cb != NULL && cb->is_compiled() ) {
 249             CompiledMethod* cm = cb->as_compiled_method();
 250             // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 251             if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
 252               csc->set_to_clean();
 253             }
 254           }
 255         break;
 256       }
 257     }
 258   }
 259 }
 260 
 261 int CompiledMethod::verify_icholder_relocations() {
 262   ResourceMark rm;
 263   int count = 0;
 264 
 265   RelocIterator iter(this);
 266   while(iter.next()) {
 267     if (iter.type() == relocInfo::virtual_call_type) {
 268       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
 269         CompiledIC *ic = CompiledIC_at(&iter);
 270         if (TraceCompiledIC) {
 271           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
 272           ic->print();
 273         }
 274         assert(ic->cached_icholder() != NULL, "must be non-NULL");
 275         count++;
 276       }
 277     }
 278   }
 279 
 280   return count;
 281 }
 282 
 283 // Method that knows how to preserve outgoing arguments at call. This method must be
 284 // called with a frame corresponding to a Java invoke
 285 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
 286 #ifndef SHARK
 287   if (method() != NULL && !method()->is_native()) {
 288     address pc = fr.pc();
 289     SimpleScopeDesc ssd(this, pc);
 290     Bytecode_invoke call(ssd.method(), ssd.bci());
 291     bool has_receiver = call.has_receiver();
 292     bool has_appendix = call.has_appendix();
 293     Symbol* signature = call.signature();
 294 
 295     // The method attached by JIT-compilers should be used, if present.
 296     // Bytecode can be inaccurate in such case.
 297     Method* callee = attached_method_before_pc(pc);
 298     if (callee != NULL) {
 299       has_receiver = !(callee->access_flags().is_static());
 300       has_appendix = false;
 301       signature = callee->signature();
 302     }
 303 
 304     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
 305   }
 306 #endif // !SHARK
 307 }
 308 
 309 // -----------------------------------------------------------------------------
 310 // CompiledMethod::get_deopt_original_pc
 311 //
 312 // Return the original PC for the given PC if:
 313 // (a) the given PC belongs to a nmethod and
 314 // (b) it is a deopt PC
 315 
 316 
 317 Method* CompiledMethod::attached_method(address call_instr) {
 318   assert(code_contains(call_instr), "not part of the nmethod");
 319   RelocIterator iter(this, call_instr, call_instr + 1);
 320   while (iter.next()) {
 321     if (iter.addr() == call_instr) {
 322       switch(iter.type()) {
 323         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
 324         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
 325         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
 326       }
 327     }
 328   }
 329   return NULL; // not found
 330 }
 331 
 332 Method* CompiledMethod::attached_method_before_pc(address pc) {
 333   if (NativeCall::is_call_before(pc)) {
 334     NativeCall* ncall = nativeCall_before(pc);
 335     return attached_method(ncall->instruction_address());
 336   }
 337   return NULL; // not a call
 338 }
 339 
 340 void CompiledMethod::clear_inline_caches() {
 341   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
 342   if (is_zombie()) {
 343     return;
 344   }
 345 
 346   RelocIterator iter(this);
 347   while (iter.next()) {
 348     iter.reloc()->clear_inline_cache();
 349   }
 350 }
 351 
 352 // Clear ICStubs of all compiled ICs
 353 void CompiledMethod::clear_ic_stubs() {
 354   assert_locked_or_safepoint(CompiledIC_lock);
 355   RelocIterator iter(this);
 356   while(iter.next()) {
 357     if (iter.type() == relocInfo::virtual_call_type) {
 358       CompiledIC* ic = CompiledIC_at(&iter);
 359       ic->clear_ic_stub();
 360     }
 361   }
 362 }
 363 
 364 #ifdef ASSERT
 365 
 366 class CheckClass : AllStatic {
 367   static BoolObjectClosure* _is_alive;
 368 
 369   // Check class_loader is alive for this bit of metadata.
 370   static void check_class(Metadata* md) {
 371     Klass* klass = NULL;
 372     if (md->is_klass()) {
 373       klass = ((Klass*)md);
 374     } else if (md->is_method()) {
 375       klass = ((Method*)md)->method_holder();
 376     } else if (md->is_methodData()) {
 377       klass = ((MethodData*)md)->method()->method_holder();
 378     } else {
 379       md->print();
 380       ShouldNotReachHere();
 381     }
 382     assert(klass->is_loader_alive(_is_alive), "must be alive");
 383   }
 384  public:
 385   static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) {
 386     assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
 387     _is_alive = is_alive;
 388     nm->metadata_do(check_class);
 389   }
 390 };
 391 
 392 // This is called during a safepoint so can use static data
 393 BoolObjectClosure* CheckClass::_is_alive = NULL;
 394 #endif // ASSERT
 395 
 396 
 397 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
 398   if (ic->is_icholder_call()) {
 399     // The only exception is compiledICHolder oops which may
 400     // yet be marked below. (We check this further below).
 401     CompiledICHolder* cichk_oop = ic->cached_icholder();
 402 
 403     if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
 404         cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
 405       return;
 406     }
 407   } else {
 408     Metadata* ic_oop = ic->cached_metadata();
 409     if (ic_oop != NULL) {
 410       if (ic_oop->is_klass()) {
 411         if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
 412           return;
 413         }
 414       } else if (ic_oop->is_method()) {
 415         if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
 416           return;
 417         }
 418       } else {
 419         ShouldNotReachHere();
 420       }
 421     }
 422   }
 423 
 424   ic->set_to_clean();
 425 }
 426 
 427 unsigned char CompiledMethod::_global_unloading_clock = 0;
 428 
 429 void CompiledMethod::increase_unloading_clock() {
 430   _global_unloading_clock++;
 431   if (_global_unloading_clock == 0) {
 432     // _nmethods are allocated with _unloading_clock == 0,
 433     // so 0 is never used as a clock value.
 434     _global_unloading_clock = 1;
 435   }
 436 }
 437 
 438 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
 439   OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
 440 }
 441 
 442 unsigned char CompiledMethod::unloading_clock() {
 443   return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
 444 }
 445 
 446 // Processing of oop references should have been sufficient to keep
 447 // all strong references alive.  Any weak references should have been
 448 // cleared as well.  Visit all the metadata and ensure that it's
 449 // really alive.
 450 void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
 451 #ifdef ASSERT
 452     RelocIterator iter(this, low_boundary);
 453     while (iter.next()) {
 454     // static_stub_Relocations may have dangling references to
 455     // Method*s so trim them out here.  Otherwise it looks like
 456     // compiled code is maintaining a link to dead metadata.
 457     address static_call_addr = NULL;
 458     if (iter.type() == relocInfo::opt_virtual_call_type) {
 459       CompiledIC* cic = CompiledIC_at(&iter);
 460       if (!cic->is_call_to_interpreted()) {
 461         static_call_addr = iter.addr();
 462       }
 463     } else if (iter.type() == relocInfo::static_call_type) {
 464       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
 465       if (!csc->is_call_to_interpreted()) {
 466         static_call_addr = iter.addr();
 467       }
 468     }
 469     if (static_call_addr != NULL) {
 470       RelocIterator sciter(this, low_boundary);
 471       while (sciter.next()) {
 472         if (sciter.type() == relocInfo::static_stub_type &&
 473             sciter.static_stub_reloc()->static_call() == static_call_addr) {
 474           sciter.static_stub_reloc()->clear_inline_cache();
 475         }
 476       }
 477     }
 478   }
 479   // Check that the metadata embedded in the nmethod is alive
 480   CheckClass::do_check_class(is_alive, this);
 481 #endif
 482 }
 483 
 484 // This is called at the end of the strong tracing/marking phase of a
 485 // GC to unload an nmethod if it contains otherwise unreachable
 486 // oops.
 487 
 488 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 489   // Make sure the oop's ready to receive visitors
 490   assert(!is_zombie() && !is_unloaded(),
 491          "should not call follow on zombie or unloaded nmethod");
 492 
 493   // If the method is not entrant then a JMP is plastered over the
 494   // first few bytes.  If an oop in the old code was there, that oop
 495   // should not get GC'd.  Skip the first few bytes of oops on
 496   // not-entrant methods.
 497   address low_boundary = verified_entry_point();
 498   if (is_not_entrant()) {
 499     low_boundary += NativeJump::instruction_size;
 500     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 501     // (See comment above.)
 502   }
 503 
 504   // The RedefineClasses() API can cause the class unloading invariant
 505   // to no longer be true. See jvmtiExport.hpp for details.
 506   // Also, leave a debugging breadcrumb in local flag.
 507   if (JvmtiExport::has_redefined_a_class()) {
 508     // This set of the unloading_occurred flag is done before the
 509     // call to post_compiled_method_unload() so that the unloading
 510     // of this nmethod is reported.
 511     unloading_occurred = true;
 512   }
 513 
 514   // Exception cache
 515   clean_exception_cache(is_alive);
 516 
 517   // If class unloading occurred we first iterate over all inline caches and
 518   // clear ICs where the cached oop is referring to an unloaded klass or method.
 519   // The remaining live cached oops will be traversed in the relocInfo::oop_type
 520   // iteration below.
 521   if (unloading_occurred) {
 522     RelocIterator iter(this, low_boundary);
 523     while(iter.next()) {
 524       if (iter.type() == relocInfo::virtual_call_type) {
 525         CompiledIC *ic = CompiledIC_at(&iter);
 526         clean_ic_if_metadata_is_dead(ic, is_alive);
 527       }
 528     }
 529   }
 530 
 531   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 532     return;
 533   }
 534 
 535 #if INCLUDE_JVMCI
 536   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 537     return;
 538   }
 539 #endif
 540 
 541   // Ensure that all metadata is still alive
 542   verify_metadata_loaders(low_boundary, is_alive);
 543 }
 544 
 545 template <class CompiledICorStaticCall>
 546 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) {
 547   // Ok, to lookup references to zombies here
 548   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
 549   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
 550   if (nm != NULL) {
 551     if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
 552       // The nmethod has not been processed yet.
 553       return true;
 554     }
 555 
 556     // Clean inline caches pointing to both zombie and not_entrant methods
 557     if (!nm->is_in_use() || (nm->method()->code() != nm)) {
 558       ic->set_to_clean();
 559       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
 560     }
 561   }
 562 
 563   return false;
 564 }
 565 
 566 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) {
 567   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
 568 }
 569 
 570 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) {
 571   return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
 572 }
 573 
 574 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
 575   ResourceMark rm;
 576 
 577   // Make sure the oop's ready to receive visitors
 578   assert(!is_zombie() && !is_unloaded(),
 579          "should not call follow on zombie or unloaded nmethod");
 580 
 581   // If the method is not entrant then a JMP is plastered over the
 582   // first few bytes.  If an oop in the old code was there, that oop
 583   // should not get GC'd.  Skip the first few bytes of oops on
 584   // not-entrant methods.
 585   address low_boundary = verified_entry_point();
 586   if (is_not_entrant()) {
 587     low_boundary += NativeJump::instruction_size;
 588     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 589     // (See comment above.)
 590   }
 591 
 592   // The RedefineClasses() API can cause the class unloading invariant
 593   // to no longer be true. See jvmtiExport.hpp for details.
 594   // Also, leave a debugging breadcrumb in local flag.
 595   if (JvmtiExport::has_redefined_a_class()) {
 596     // This set of the unloading_occurred flag is done before the
 597     // call to post_compiled_method_unload() so that the unloading
 598     // of this nmethod is reported.
 599     unloading_occurred = true;
 600   }
 601 
 602   // Exception cache
 603   clean_exception_cache(is_alive);
 604 
 605   bool postponed = false;
 606 
 607   RelocIterator iter(this, low_boundary);
 608   while(iter.next()) {
 609 
 610     switch (iter.type()) {
 611 
 612     case relocInfo::virtual_call_type:
 613       if (unloading_occurred) {
 614         // If class unloading occurred we first iterate over all inline caches and
 615         // clear ICs where the cached oop is referring to an unloaded klass or method.
 616         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
 617       }
 618 
 619       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 620       break;
 621 
 622     case relocInfo::opt_virtual_call_type:
 623       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 624       break;
 625 
 626     case relocInfo::static_call_type:
 627       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 628       break;
 629 
 630     case relocInfo::oop_type:
 631       // handled by do_unloading_oops below
 632       break;
 633 
 634     case relocInfo::metadata_type:
 635       break; // nothing to do.
 636     }
 637   }
 638 
 639   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 640     return postponed;
 641   }
 642 
 643 #if INCLUDE_JVMCI
 644   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 645     return postponed;
 646   }
 647 #endif
 648 
 649   // Ensure that all metadata is still alive
 650   verify_metadata_loaders(low_boundary, is_alive);
 651 
 652   return postponed;
 653 }
 654 
 655 void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
 656   ResourceMark rm;
 657 
 658   // Make sure the oop's ready to receive visitors
 659   assert(!is_zombie(),
 660          "should not call follow on zombie nmethod");
 661 
 662   // If the method is not entrant then a JMP is plastered over the
 663   // first few bytes.  If an oop in the old code was there, that oop
 664   // should not get GC'd.  Skip the first few bytes of oops on
 665   // not-entrant methods.
 666   address low_boundary = verified_entry_point();
 667   if (is_not_entrant()) {
 668     low_boundary += NativeJump::instruction_size;
 669     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 670     // (See comment above.)
 671   }
 672 
 673   RelocIterator iter(this, low_boundary);
 674   while(iter.next()) {
 675 
 676     switch (iter.type()) {
 677 
 678     case relocInfo::virtual_call_type:
 679       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 680       break;
 681 
 682     case relocInfo::opt_virtual_call_type:
 683       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 684       break;
 685 
 686     case relocInfo::static_call_type:
 687       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 688       break;
 689     }
 690   }
 691 }