1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/compiledIC.hpp"
  27 #include "code/compiledMethod.inline.hpp"
  28 #include "code/scopeDesc.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "interpreter/bytecode.inline.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/methodData.hpp"
  33 #include "oops/method.inline.hpp"
  34 #include "prims/methodHandles.hpp"
  35 #include "runtime/handles.inline.hpp"
  36 #include "runtime/mutexLocker.hpp"
  37 
  38 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
  39   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  40   _method(method), _mark_for_deoptimization_status(not_marked) {
  41   init_defaults();
  42 }
  43 
  44 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
  45   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  46   _method(method), _mark_for_deoptimization_status(not_marked) {
  47   init_defaults();
  48 }
  49 
  50 void CompiledMethod::init_defaults() {
  51   _has_unsafe_access          = 0;
  52   _has_method_handle_invokes  = 0;
  53   _lazy_critical_native       = 0;
  54   _has_wide_vectors           = 0;
  55   _unloading_clock            = 0;
  56 }
  57 
  58 bool CompiledMethod::is_method_handle_return(address return_pc) {
  59   if (!has_method_handle_invokes())  return false;
  60   PcDesc* pd = pc_desc_at(return_pc);
  61   if (pd == NULL)
  62     return false;
  63   return pd->is_method_handle_invoke();
  64 }
  65 
  66 // Returns a string version of the method state.
  67 const char* CompiledMethod::state() const {
  68   int state = get_state();
  69   switch (state) {
  70   case not_installed:
  71     return "not installed";
  72   case in_use:
  73     return "in use";
  74   case not_used:
  75     return "not_used";
  76   case not_entrant:
  77     return "not_entrant";
  78   case zombie:
  79     return "zombie";
  80   case unloaded:
  81     return "unloaded";
  82   default:
  83     fatal("unexpected method state: %d", state);
  84     return NULL;
  85   }
  86 }
  87 
  88 //-----------------------------------------------------------------------------
  89 
  90 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
  91   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
  92   assert(new_entry != NULL,"Must be non null");
  93   assert(new_entry->next() == NULL, "Must be null");
  94 
  95   ExceptionCache *ec = exception_cache();
  96   if (ec != NULL) {
  97     new_entry->set_next(ec);
  98   }
  99   release_set_exception_cache(new_entry);
 100 }
 101 
 102 void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
 103   ExceptionCache* prev = NULL;
 104   ExceptionCache* curr = exception_cache();
 105 
 106   while (curr != NULL) {
 107     ExceptionCache* next = curr->next();
 108 
 109     Klass* ex_klass = curr->exception_type();
 110     if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
 111       if (prev == NULL) {
 112         set_exception_cache(next);
 113       } else {
 114         prev->set_next(next);
 115       }
 116       delete curr;
 117       // prev stays the same.
 118     } else {
 119       prev = curr;
 120     }
 121 
 122     curr = next;
 123   }
 124 }
 125 
 126 // public method for accessing the exception cache
 127 // These are the public access methods.
 128 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
 129   // We never grab a lock to read the exception cache, so we may
 130   // have false negatives. This is okay, as it can only happen during
 131   // the first few exception lookups for a given nmethod.
 132   ExceptionCache* ec = exception_cache();
 133   while (ec != NULL) {
 134     address ret_val;
 135     if ((ret_val = ec->match(exception,pc)) != NULL) {
 136       return ret_val;
 137     }
 138     ec = ec->next();
 139   }
 140   return NULL;
 141 }
 142 
 143 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
 144   // There are potential race conditions during exception cache updates, so we
 145   // must own the ExceptionCache_lock before doing ANY modifications. Because
 146   // we don't lock during reads, it is possible to have several threads attempt
 147   // to update the cache with the same data. We need to check for already inserted
 148   // copies of the current data before adding it.
 149 
 150   MutexLocker ml(ExceptionCache_lock);
 151   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
 152 
 153   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
 154     target_entry = new ExceptionCache(exception,pc,handler);
 155     add_exception_cache_entry(target_entry);
 156   }
 157 }
 158 
 159 //-------------end of code for ExceptionCache--------------
 160 
 161 // private method for handling exception cache
 162 // These methods are private, and used to manipulate the exception cache
 163 // directly.
 164 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
 165   ExceptionCache* ec = exception_cache();
 166   while (ec != NULL) {
 167     if (ec->match_exception_with_space(exception)) {
 168       return ec;
 169     }
 170     ec = ec->next();
 171   }
 172   return NULL;
 173 }
 174 
 175 bool CompiledMethod::is_at_poll_return(address pc) {
 176   RelocIterator iter(this, pc, pc+1);
 177   while (iter.next()) {
 178     if (iter.type() == relocInfo::poll_return_type)
 179       return true;
 180   }
 181   return false;
 182 }
 183 
 184 
 185 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
 186   RelocIterator iter(this, pc, pc+1);
 187   while (iter.next()) {
 188     relocInfo::relocType t = iter.type();
 189     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
 190       return true;
 191   }
 192   return false;
 193 }
 194 
 195 void CompiledMethod::verify_oop_relocations() {
 196   // Ensure sure that the code matches the current oop values
 197   RelocIterator iter(this, NULL, NULL);
 198   while (iter.next()) {
 199     if (iter.type() == relocInfo::oop_type) {
 200       oop_Relocation* reloc = iter.oop_reloc();
 201       if (!reloc->oop_is_immediate()) {
 202         reloc->verify_oop_relocation();
 203       }
 204     }
 205   }
 206 }
 207 
 208 
 209 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
 210   PcDesc* pd = pc_desc_at(pc);
 211   guarantee(pd != NULL, "scope must be present");
 212   return new ScopeDesc(this, pd->scope_decode_offset(),
 213                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 214                        pd->return_oop());
 215 }
 216 
 217 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
 218   PcDesc* pd = pc_desc_near(pc);
 219   guarantee(pd != NULL, "scope must be present");
 220   return new ScopeDesc(this, pd->scope_decode_offset(),
 221                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 222                        pd->return_oop());
 223 }
 224 
 225 void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
 226   assert_locked_or_safepoint(CompiledIC_lock);
 227 
 228   // If the method is not entrant or zombie then a JMP is plastered over the
 229   // first few bytes.  If an oop in the old code was there, that oop
 230   // should not get GC'd.  Skip the first few bytes of oops on
 231   // not-entrant methods.
 232   address low_boundary = verified_entry_point();
 233   if (!is_in_use() && is_nmethod()) {
 234     low_boundary += NativeJump::instruction_size;
 235     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 236     // This means that the low_boundary is going to be a little too high.
 237     // This shouldn't matter, since oops of non-entrant methods are never used.
 238     // In fact, why are we bothering to look at oops in a non-entrant method??
 239   }
 240 
 241   // Find all calls in an nmethod and clear the ones that point to non-entrant,
 242   // zombie and unloaded nmethods.
 243   ResourceMark rm;
 244   RelocIterator iter(this, low_boundary);
 245   while(iter.next()) {
 246     switch(iter.type()) {
 247       case relocInfo::virtual_call_type:
 248       case relocInfo::opt_virtual_call_type: {
 249         CompiledIC *ic = CompiledIC_at(&iter);
 250         // Ok, to lookup references to zombies here
 251         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
 252         if( cb != NULL && cb->is_compiled() ) {
 253           CompiledMethod* nm = cb->as_compiled_method();
 254           // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 255           if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
 256         }
 257         break;
 258       }
 259       case relocInfo::static_call_type: {
 260           CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
 261           CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
 262           if( cb != NULL && cb->is_compiled() ) {
 263             CompiledMethod* cm = cb->as_compiled_method();
 264             // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 265             if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
 266               csc->set_to_clean();
 267             }
 268           }
 269         break;
 270       }
 271       default:
 272         break;
 273     }
 274   }
 275 }
 276 
 277 int CompiledMethod::verify_icholder_relocations() {
 278   ResourceMark rm;
 279   int count = 0;
 280 
 281   RelocIterator iter(this);
 282   while(iter.next()) {
 283     if (iter.type() == relocInfo::virtual_call_type) {
 284       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
 285         CompiledIC *ic = CompiledIC_at(&iter);
 286         if (TraceCompiledIC) {
 287           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
 288           ic->print();
 289         }
 290         assert(ic->cached_icholder() != NULL, "must be non-NULL");
 291         count++;
 292       }
 293     }
 294   }
 295 
 296   return count;
 297 }
 298 
 299 // Method that knows how to preserve outgoing arguments at call. This method must be
 300 // called with a frame corresponding to a Java invoke
 301 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
 302   if (method() != NULL && !method()->is_native()) {
 303     address pc = fr.pc();
 304     SimpleScopeDesc ssd(this, pc);
 305     Bytecode_invoke call(ssd.method(), ssd.bci());
 306     bool has_receiver = call.has_receiver();
 307     bool has_appendix = call.has_appendix();
 308     Symbol* signature = call.signature();
 309 
 310     // The method attached by JIT-compilers should be used, if present.
 311     // Bytecode can be inaccurate in such case.
 312     Method* callee = attached_method_before_pc(pc);
 313     if (callee != NULL) {
 314       has_receiver = !(callee->access_flags().is_static());
 315       has_appendix = false;
 316       signature = callee->signature();
 317     }
 318 
 319     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
 320   }
 321 }
 322 
 323 Method* CompiledMethod::attached_method(address call_instr) {
 324   assert(code_contains(call_instr), "not part of the nmethod");
 325   RelocIterator iter(this, call_instr, call_instr + 1);
 326   while (iter.next()) {
 327     if (iter.addr() == call_instr) {
 328       switch(iter.type()) {
 329         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
 330         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
 331         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
 332         default:                               break;
 333       }
 334     }
 335   }
 336   return NULL; // not found
 337 }
 338 
 339 Method* CompiledMethod::attached_method_before_pc(address pc) {
 340   if (NativeCall::is_call_before(pc)) {
 341     NativeCall* ncall = nativeCall_before(pc);
 342     return attached_method(ncall->instruction_address());
 343   }
 344   return NULL; // not a call
 345 }
 346 
 347 void CompiledMethod::clear_inline_caches() {
 348   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
 349   if (is_zombie()) {
 350     return;
 351   }
 352 
 353   RelocIterator iter(this);
 354   while (iter.next()) {
 355     iter.reloc()->clear_inline_cache();
 356   }
 357 }
 358 
 359 // Clear ICStubs of all compiled ICs
 360 void CompiledMethod::clear_ic_stubs() {
 361   assert_locked_or_safepoint(CompiledIC_lock);
 362   RelocIterator iter(this);
 363   while(iter.next()) {
 364     if (iter.type() == relocInfo::virtual_call_type) {
 365       CompiledIC* ic = CompiledIC_at(&iter);
 366       ic->clear_ic_stub();
 367     }
 368   }
 369 }
 370 
 371 #ifdef ASSERT
 372 
 373 class CheckClass : AllStatic {
 374   static BoolObjectClosure* _is_alive;
 375 
 376   // Check class_loader is alive for this bit of metadata.
 377   static void check_class(Metadata* md) {
 378     Klass* klass = NULL;
 379     if (md->is_klass()) {
 380       klass = ((Klass*)md);
 381     } else if (md->is_method()) {
 382       klass = ((Method*)md)->method_holder();
 383     } else if (md->is_methodData()) {
 384       klass = ((MethodData*)md)->method()->method_holder();
 385     } else {
 386       md->print();
 387       ShouldNotReachHere();
 388     }
 389     assert(klass->is_loader_alive(_is_alive), "must be alive");
 390   }
 391  public:
 392   static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) {
 393     assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
 394     _is_alive = is_alive;
 395     nm->metadata_do(check_class);
 396   }
 397 };
 398 
 399 // This is called during a safepoint so can use static data
 400 BoolObjectClosure* CheckClass::_is_alive = NULL;
 401 #endif // ASSERT
 402 
 403 
 404 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
 405   if (ic->is_icholder_call()) {
 406     // The only exception is compiledICHolder oops which may
 407     // yet be marked below. (We check this further below).
 408     CompiledICHolder* cichk_oop = ic->cached_icholder();
 409 
 410     if (cichk_oop->is_loader_alive(is_alive)) {
 411       return;
 412     }
 413   } else {
 414     Metadata* ic_oop = ic->cached_metadata();
 415     if (ic_oop != NULL) {
 416       if (ic_oop->is_klass()) {
 417         if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
 418           return;
 419         }
 420       } else if (ic_oop->is_method()) {
 421         if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
 422           return;
 423         }
 424       } else {
 425         ShouldNotReachHere();
 426       }
 427     }
 428   }
 429 
 430   ic->set_to_clean();
 431 }
 432 
 433 unsigned char CompiledMethod::_global_unloading_clock = 0;
 434 
 435 void CompiledMethod::increase_unloading_clock() {
 436   _global_unloading_clock++;
 437   if (_global_unloading_clock == 0) {
 438     // _nmethods are allocated with _unloading_clock == 0,
 439     // so 0 is never used as a clock value.
 440     _global_unloading_clock = 1;
 441   }
 442 }
 443 
 444 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
 445   OrderAccess::release_store(&_unloading_clock, unloading_clock);
 446 }
 447 
 448 unsigned char CompiledMethod::unloading_clock() {
 449   return OrderAccess::load_acquire(&_unloading_clock);
 450 }
 451 
 452 // Processing of oop references should have been sufficient to keep
 453 // all strong references alive.  Any weak references should have been
 454 // cleared as well.  Visit all the metadata and ensure that it's
 455 // really alive.
 456 void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
 457 #ifdef ASSERT
 458     RelocIterator iter(this, low_boundary);
 459     while (iter.next()) {
 460     // static_stub_Relocations may have dangling references to
 461     // Method*s so trim them out here.  Otherwise it looks like
 462     // compiled code is maintaining a link to dead metadata.
 463     address static_call_addr = NULL;
 464     if (iter.type() == relocInfo::opt_virtual_call_type) {
 465       CompiledIC* cic = CompiledIC_at(&iter);
 466       if (!cic->is_call_to_interpreted()) {
 467         static_call_addr = iter.addr();
 468       }
 469     } else if (iter.type() == relocInfo::static_call_type) {
 470       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
 471       if (!csc->is_call_to_interpreted()) {
 472         static_call_addr = iter.addr();
 473       }
 474     }
 475     if (static_call_addr != NULL) {
 476       RelocIterator sciter(this, low_boundary);
 477       while (sciter.next()) {
 478         if (sciter.type() == relocInfo::static_stub_type &&
 479             sciter.static_stub_reloc()->static_call() == static_call_addr) {
 480           sciter.static_stub_reloc()->clear_inline_cache();
 481         }
 482       }
 483     }
 484   }
 485   // Check that the metadata embedded in the nmethod is alive
 486   CheckClass::do_check_class(is_alive, this);
 487 #endif
 488 }
 489 
 490 // This is called at the end of the strong tracing/marking phase of a
 491 // GC to unload an nmethod if it contains otherwise unreachable
 492 // oops.
 493 
 494 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 495   // Make sure the oop's ready to receive visitors
 496   assert(!is_zombie() && !is_unloaded(),
 497          "should not call follow on zombie or unloaded nmethod");
 498 
 499   // If the method is not entrant then a JMP is plastered over the
 500   // first few bytes.  If an oop in the old code was there, that oop
 501   // should not get GC'd.  Skip the first few bytes of oops on
 502   // not-entrant methods.
 503   address low_boundary = verified_entry_point();
 504   if (is_not_entrant()) {
 505     low_boundary += NativeJump::instruction_size;
 506     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 507     // (See comment above.)
 508   }
 509 
 510   // The RedefineClasses() API can cause the class unloading invariant
 511   // to no longer be true. See jvmtiExport.hpp for details.
 512   // Also, leave a debugging breadcrumb in local flag.
 513   if (JvmtiExport::has_redefined_a_class()) {
 514     // This set of the unloading_occurred flag is done before the
 515     // call to post_compiled_method_unload() so that the unloading
 516     // of this nmethod is reported.
 517     unloading_occurred = true;
 518   }
 519 
 520   // Exception cache
 521   clean_exception_cache(is_alive);
 522 
 523   // If class unloading occurred we first iterate over all inline caches and
 524   // clear ICs where the cached oop is referring to an unloaded klass or method.
 525   // The remaining live cached oops will be traversed in the relocInfo::oop_type
 526   // iteration below.
 527   if (unloading_occurred) {
 528     RelocIterator iter(this, low_boundary);
 529     while(iter.next()) {
 530       if (iter.type() == relocInfo::virtual_call_type) {
 531         CompiledIC *ic = CompiledIC_at(&iter);
 532         clean_ic_if_metadata_is_dead(ic, is_alive);
 533       }
 534     }
 535   }
 536 
 537   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 538     return;
 539   }
 540 
 541 #if INCLUDE_JVMCI
 542   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 543     return;
 544   }
 545 #endif
 546 
 547   // Ensure that all metadata is still alive
 548   verify_metadata_loaders(low_boundary, is_alive);
 549 }
 550 
 551 template <class CompiledICorStaticCall>
 552 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) {
 553   // Ok, to lookup references to zombies here
 554   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
 555   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
 556   if (nm != NULL) {
 557     if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
 558       // The nmethod has not been processed yet.
 559       return true;
 560     }
 561 
 562     // Clean inline caches pointing to both zombie and not_entrant methods
 563     if (!nm->is_in_use() || (nm->method()->code() != nm)) {
 564       ic->set_to_clean();
 565       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
 566     }
 567   }
 568 
 569   return false;
 570 }
 571 
 572 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) {
 573   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
 574 }
 575 
 576 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) {
 577   return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
 578 }
 579 
 580 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
 581   ResourceMark rm;
 582 
 583   // Make sure the oop's ready to receive visitors
 584   assert(!is_zombie() && !is_unloaded(),
 585          "should not call follow on zombie or unloaded nmethod");
 586 
 587   // If the method is not entrant then a JMP is plastered over the
 588   // first few bytes.  If an oop in the old code was there, that oop
 589   // should not get GC'd.  Skip the first few bytes of oops on
 590   // not-entrant methods.
 591   address low_boundary = verified_entry_point();
 592   if (is_not_entrant()) {
 593     low_boundary += NativeJump::instruction_size;
 594     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 595     // (See comment above.)
 596   }
 597 
 598   // The RedefineClasses() API can cause the class unloading invariant
 599   // to no longer be true. See jvmtiExport.hpp for details.
 600   // Also, leave a debugging breadcrumb in local flag.
 601   if (JvmtiExport::has_redefined_a_class()) {
 602     // This set of the unloading_occurred flag is done before the
 603     // call to post_compiled_method_unload() so that the unloading
 604     // of this nmethod is reported.
 605     unloading_occurred = true;
 606   }
 607 
 608   // Exception cache
 609   clean_exception_cache(is_alive);
 610 
 611   bool postponed = false;
 612 
 613   RelocIterator iter(this, low_boundary);
 614   while(iter.next()) {
 615 
 616     switch (iter.type()) {
 617 
 618     case relocInfo::virtual_call_type:
 619       if (unloading_occurred) {
 620         // If class unloading occurred we first iterate over all inline caches and
 621         // clear ICs where the cached oop is referring to an unloaded klass or method.
 622         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
 623       }
 624 
 625       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 626       break;
 627 
 628     case relocInfo::opt_virtual_call_type:
 629       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 630       break;
 631 
 632     case relocInfo::static_call_type:
 633       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 634       break;
 635 
 636     case relocInfo::oop_type:
 637       // handled by do_unloading_oops below
 638       break;
 639 
 640     case relocInfo::metadata_type:
 641       break; // nothing to do.
 642 
 643     default:
 644       break;
 645     }
 646   }
 647 
 648   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 649     return postponed;
 650   }
 651 
 652 #if INCLUDE_JVMCI
 653   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 654     return postponed;
 655   }
 656 #endif
 657 
 658   // Ensure that all metadata is still alive
 659   verify_metadata_loaders(low_boundary, is_alive);
 660 
 661   return postponed;
 662 }
 663 
 664 void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
 665   ResourceMark rm;
 666 
 667   // Make sure the oop's ready to receive visitors
 668   assert(!is_zombie(),
 669          "should not call follow on zombie nmethod");
 670 
 671   // If the method is not entrant then a JMP is plastered over the
 672   // first few bytes.  If an oop in the old code was there, that oop
 673   // should not get GC'd.  Skip the first few bytes of oops on
 674   // not-entrant methods.
 675   address low_boundary = verified_entry_point();
 676   if (is_not_entrant()) {
 677     low_boundary += NativeJump::instruction_size;
 678     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 679     // (See comment above.)
 680   }
 681 
 682   RelocIterator iter(this, low_boundary);
 683   while(iter.next()) {
 684 
 685     switch (iter.type()) {
 686 
 687     case relocInfo::virtual_call_type:
 688       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 689       break;
 690 
 691     case relocInfo::opt_virtual_call_type:
 692       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 693       break;
 694 
 695     case relocInfo::static_call_type:
 696       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 697       break;
 698 
 699     default:
 700       break;
 701     }
 702   }
 703 }