1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/compiledIC.hpp"
  27 #include "code/compiledMethod.inline.hpp"
  28 #include "code/scopeDesc.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "prims/methodHandles.hpp"
  31 #include "interpreter/bytecode.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "oops/method.inline.hpp"
  34 #include "runtime/handles.inline.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 
  37 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
  38   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  39   _method(method), _mark_for_deoptimization_status(not_marked) {
  40   init_defaults();
  41 }
  42 
  43 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
  44   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  45   _method(method), _mark_for_deoptimization_status(not_marked) {
  46   init_defaults();
  47 }
  48 
  49 void CompiledMethod::init_defaults() {
  50   _has_unsafe_access          = 0;
  51   _has_method_handle_invokes  = 0;
  52   _lazy_critical_native       = 0;
  53   _has_wide_vectors           = 0;
  54   _unloading_clock            = 0;
  55 }
  56 
  57 bool CompiledMethod::is_method_handle_return(address return_pc) {
  58   if (!has_method_handle_invokes())  return false;
  59   PcDesc* pd = pc_desc_at(return_pc);
  60   if (pd == NULL)
  61     return false;
  62   return pd->is_method_handle_invoke();
  63 }
  64 
  65 // Returns a string version of the method state.
  66 const char* CompiledMethod::state() const {
  67   int state = get_state();
  68   switch (state) {
  69   case not_installed:
  70     return "not installed";
  71   case in_use:
  72     return "in use";
  73   case not_used:
  74     return "not_used";
  75   case not_entrant:
  76     return "not_entrant";
  77   case zombie:
  78     return "zombie";
  79   case unloaded:
  80     return "unloaded";
  81   default:
  82     fatal("unexpected method state: %d", state);
  83     return NULL;
  84   }
  85 }
  86 
  87 //-----------------------------------------------------------------------------
  88 
  89 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
  90   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
  91   assert(new_entry != NULL,"Must be non null");
  92   assert(new_entry->next() == NULL, "Must be null");
  93 
  94   ExceptionCache *ec = exception_cache();
  95   if (ec != NULL) {
  96     new_entry->set_next(ec);
  97   }
  98   release_set_exception_cache(new_entry);
  99 }
 100 
 101 void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
 102   ExceptionCache* prev = NULL;
 103   ExceptionCache* curr = exception_cache();
 104 
 105   while (curr != NULL) {
 106     ExceptionCache* next = curr->next();
 107 
 108     Klass* ex_klass = curr->exception_type();
 109     if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
 110       if (prev == NULL) {
 111         set_exception_cache(next);
 112       } else {
 113         prev->set_next(next);
 114       }
 115       delete curr;
 116       // prev stays the same.
 117     } else {
 118       prev = curr;
 119     }
 120 
 121     curr = next;
 122   }
 123 }
 124 
 125 // public method for accessing the exception cache
 126 // These are the public access methods.
 127 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
 128   // We never grab a lock to read the exception cache, so we may
 129   // have false negatives. This is okay, as it can only happen during
 130   // the first few exception lookups for a given nmethod.
 131   ExceptionCache* ec = exception_cache();
 132   while (ec != NULL) {
 133     address ret_val;
 134     if ((ret_val = ec->match(exception,pc)) != NULL) {
 135       return ret_val;
 136     }
 137     ec = ec->next();
 138   }
 139   return NULL;
 140 }
 141 
 142 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
 143   // There are potential race conditions during exception cache updates, so we
 144   // must own the ExceptionCache_lock before doing ANY modifications. Because
 145   // we don't lock during reads, it is possible to have several threads attempt
 146   // to update the cache with the same data. We need to check for already inserted
 147   // copies of the current data before adding it.
 148 
 149   MutexLocker ml(ExceptionCache_lock);
 150   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
 151 
 152   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
 153     target_entry = new ExceptionCache(exception,pc,handler);
 154     add_exception_cache_entry(target_entry);
 155   }
 156 }
 157 
 158 //-------------end of code for ExceptionCache--------------
 159 
 160 // private method for handling exception cache
 161 // These methods are private, and used to manipulate the exception cache
 162 // directly.
 163 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
 164   ExceptionCache* ec = exception_cache();
 165   while (ec != NULL) {
 166     if (ec->match_exception_with_space(exception)) {
 167       return ec;
 168     }
 169     ec = ec->next();
 170   }
 171   return NULL;
 172 }
 173 
 174 bool CompiledMethod::is_at_poll_return(address pc) {
 175   RelocIterator iter(this, pc, pc+1);
 176   while (iter.next()) {
 177     if (iter.type() == relocInfo::poll_return_type)
 178       return true;
 179   }
 180   return false;
 181 }
 182 
 183 
 184 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
 185   RelocIterator iter(this, pc, pc+1);
 186   while (iter.next()) {
 187     relocInfo::relocType t = iter.type();
 188     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
 189       return true;
 190   }
 191   return false;
 192 }
 193 
 194 void CompiledMethod::verify_oop_relocations() {
 195   // Ensure sure that the code matches the current oop values
 196   RelocIterator iter(this, NULL, NULL);
 197   while (iter.next()) {
 198     if (iter.type() == relocInfo::oop_type) {
 199       oop_Relocation* reloc = iter.oop_reloc();
 200       if (!reloc->oop_is_immediate()) {
 201         reloc->verify_oop_relocation();
 202       }
 203     }
 204   }
 205 }
 206 
 207 
 208 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
 209   PcDesc* pd = pc_desc_at(pc);
 210   guarantee(pd != NULL, "scope must be present");
 211   return new ScopeDesc(this, pd->scope_decode_offset(),
 212                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 213                        pd->return_oop());
 214 }
 215 
 216 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
 217   PcDesc* pd = pc_desc_near(pc);
 218   guarantee(pd != NULL, "scope must be present");
 219   return new ScopeDesc(this, pd->scope_decode_offset(),
 220                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 221                        pd->return_oop());
 222 }
 223 
 224 void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
 225   assert_locked_or_safepoint(CompiledIC_lock);
 226 
 227   // If the method is not entrant or zombie then a JMP is plastered over the
 228   // first few bytes.  If an oop in the old code was there, that oop
 229   // should not get GC'd.  Skip the first few bytes of oops on
 230   // not-entrant methods.
 231   address low_boundary = verified_entry_point();
 232   if (!is_in_use() && is_nmethod()) {
 233     low_boundary += NativeJump::instruction_size;
 234     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 235     // This means that the low_boundary is going to be a little too high.
 236     // This shouldn't matter, since oops of non-entrant methods are never used.
 237     // In fact, why are we bothering to look at oops in a non-entrant method??
 238   }
 239 
 240   // Find all calls in an nmethod and clear the ones that point to non-entrant,
 241   // zombie and unloaded nmethods.
 242   ResourceMark rm;
 243   RelocIterator iter(this, low_boundary);
 244   while(iter.next()) {
 245     switch(iter.type()) {
 246       case relocInfo::virtual_call_type:
 247       case relocInfo::opt_virtual_call_type: {
 248         CompiledIC *ic = CompiledIC_at(&iter);
 249         // Ok, to lookup references to zombies here
 250         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
 251         if( cb != NULL && cb->is_compiled() ) {
 252           CompiledMethod* nm = cb->as_compiled_method();
 253           // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 254           if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
 255         }
 256         break;
 257       }
 258       case relocInfo::static_call_type: {
 259           CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
 260           CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
 261           if( cb != NULL && cb->is_compiled() ) {
 262             CompiledMethod* cm = cb->as_compiled_method();
 263             // Clean inline caches pointing to zombie, non-entrant and unloaded methods
 264             if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
 265               csc->set_to_clean();
 266             }
 267           }
 268         break;
 269       }
 270       default:
 271         break;
 272     }
 273   }
 274 }
 275 
 276 int CompiledMethod::verify_icholder_relocations() {
 277   ResourceMark rm;
 278   int count = 0;
 279 
 280   RelocIterator iter(this);
 281   while(iter.next()) {
 282     if (iter.type() == relocInfo::virtual_call_type) {
 283       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
 284         CompiledIC *ic = CompiledIC_at(&iter);
 285         if (TraceCompiledIC) {
 286           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
 287           ic->print();
 288         }
 289         assert(ic->cached_icholder() != NULL, "must be non-NULL");
 290         count++;
 291       }
 292     }
 293   }
 294 
 295   return count;
 296 }
 297 
 298 // Method that knows how to preserve outgoing arguments at call. This method must be
 299 // called with a frame corresponding to a Java invoke
 300 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
 301   if (method() != NULL && !method()->is_native()) {
 302     address pc = fr.pc();
 303     SimpleScopeDesc ssd(this, pc);
 304     Bytecode_invoke call(ssd.method(), ssd.bci());
 305     bool has_receiver = call.has_receiver();
 306     bool has_appendix = call.has_appendix();
 307     Symbol* signature = call.signature();
 308 
 309     // The method attached by JIT-compilers should be used, if present.
 310     // Bytecode can be inaccurate in such case.
 311     Method* callee = attached_method_before_pc(pc);
 312     if (callee != NULL) {
 313       has_receiver = !(callee->access_flags().is_static());
 314       has_appendix = false;
 315       signature = callee->signature();
 316     }
 317 
 318     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
 319   }
 320 }
 321 
 322 Method* CompiledMethod::attached_method(address call_instr) {
 323   assert(code_contains(call_instr), "not part of the nmethod");
 324   RelocIterator iter(this, call_instr, call_instr + 1);
 325   while (iter.next()) {
 326     if (iter.addr() == call_instr) {
 327       switch(iter.type()) {
 328         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
 329         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
 330         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
 331         default:                               break;
 332       }
 333     }
 334   }
 335   return NULL; // not found
 336 }
 337 
 338 Method* CompiledMethod::attached_method_before_pc(address pc) {
 339   if (NativeCall::is_call_before(pc)) {
 340     NativeCall* ncall = nativeCall_before(pc);
 341     return attached_method(ncall->instruction_address());
 342   }
 343   return NULL; // not a call
 344 }
 345 
 346 void CompiledMethod::clear_inline_caches() {
 347   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
 348   if (is_zombie()) {
 349     return;
 350   }
 351 
 352   RelocIterator iter(this);
 353   while (iter.next()) {
 354     iter.reloc()->clear_inline_cache();
 355   }
 356 }
 357 
 358 // Clear ICStubs of all compiled ICs
 359 void CompiledMethod::clear_ic_stubs() {
 360   assert_locked_or_safepoint(CompiledIC_lock);
 361   RelocIterator iter(this);
 362   while(iter.next()) {
 363     if (iter.type() == relocInfo::virtual_call_type) {
 364       CompiledIC* ic = CompiledIC_at(&iter);
 365       ic->clear_ic_stub();
 366     }
 367   }
 368 }
 369 
 370 #ifdef ASSERT
 371 
 372 class CheckClass : AllStatic {
 373   static BoolObjectClosure* _is_alive;
 374 
 375   // Check class_loader is alive for this bit of metadata.
 376   static void check_class(Metadata* md) {
 377     Klass* klass = NULL;
 378     if (md->is_klass()) {
 379       klass = ((Klass*)md);
 380     } else if (md->is_method()) {
 381       klass = ((Method*)md)->method_holder();
 382     } else if (md->is_methodData()) {
 383       klass = ((MethodData*)md)->method()->method_holder();
 384     } else {
 385       md->print();
 386       ShouldNotReachHere();
 387     }
 388     assert(klass->is_loader_alive(_is_alive), "must be alive");
 389   }
 390  public:
 391   static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) {
 392     assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
 393     _is_alive = is_alive;
 394     nm->metadata_do(check_class);
 395   }
 396 };
 397 
 398 // This is called during a safepoint so can use static data
 399 BoolObjectClosure* CheckClass::_is_alive = NULL;
 400 #endif // ASSERT
 401 
 402 
 403 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
 404   if (ic->is_icholder_call()) {
 405     // The only exception is compiledICHolder oops which may
 406     // yet be marked below. (We check this further below).
 407     CompiledICHolder* cichk_oop = ic->cached_icholder();
 408 
 409     if (cichk_oop->is_loader_alive(is_alive)) {
 410       return;
 411     }
 412   } else {
 413     Metadata* ic_oop = ic->cached_metadata();
 414     if (ic_oop != NULL) {
 415       if (ic_oop->is_klass()) {
 416         if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
 417           return;
 418         }
 419       } else if (ic_oop->is_method()) {
 420         if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
 421           return;
 422         }
 423       } else {
 424         ShouldNotReachHere();
 425       }
 426     }
 427   }
 428 
 429   ic->set_to_clean();
 430 }
 431 
 432 unsigned char CompiledMethod::_global_unloading_clock = 0;
 433 
 434 void CompiledMethod::increase_unloading_clock() {
 435   _global_unloading_clock++;
 436   if (_global_unloading_clock == 0) {
 437     // _nmethods are allocated with _unloading_clock == 0,
 438     // so 0 is never used as a clock value.
 439     _global_unloading_clock = 1;
 440   }
 441 }
 442 
 443 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
 444   OrderAccess::release_store(&_unloading_clock, unloading_clock);
 445 }
 446 
 447 unsigned char CompiledMethod::unloading_clock() {
 448   return OrderAccess::load_acquire(&_unloading_clock);
 449 }
 450 
 451 // Processing of oop references should have been sufficient to keep
 452 // all strong references alive.  Any weak references should have been
 453 // cleared as well.  Visit all the metadata and ensure that it's
 454 // really alive.
 455 void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
 456 #ifdef ASSERT
 457     RelocIterator iter(this, low_boundary);
 458     while (iter.next()) {
 459     // static_stub_Relocations may have dangling references to
 460     // Method*s so trim them out here.  Otherwise it looks like
 461     // compiled code is maintaining a link to dead metadata.
 462     address static_call_addr = NULL;
 463     if (iter.type() == relocInfo::opt_virtual_call_type) {
 464       CompiledIC* cic = CompiledIC_at(&iter);
 465       if (!cic->is_call_to_interpreted()) {
 466         static_call_addr = iter.addr();
 467       }
 468     } else if (iter.type() == relocInfo::static_call_type) {
 469       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
 470       if (!csc->is_call_to_interpreted()) {
 471         static_call_addr = iter.addr();
 472       }
 473     }
 474     if (static_call_addr != NULL) {
 475       RelocIterator sciter(this, low_boundary);
 476       while (sciter.next()) {
 477         if (sciter.type() == relocInfo::static_stub_type &&
 478             sciter.static_stub_reloc()->static_call() == static_call_addr) {
 479           sciter.static_stub_reloc()->clear_inline_cache();
 480         }
 481       }
 482     }
 483   }
 484   // Check that the metadata embedded in the nmethod is alive
 485   CheckClass::do_check_class(is_alive, this);
 486 #endif
 487 }
 488 
 489 // This is called at the end of the strong tracing/marking phase of a
 490 // GC to unload an nmethod if it contains otherwise unreachable
 491 // oops.
 492 
 493 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 494   // Make sure the oop's ready to receive visitors
 495   assert(!is_zombie() && !is_unloaded(),
 496          "should not call follow on zombie or unloaded nmethod");
 497 
 498   // If the method is not entrant then a JMP is plastered over the
 499   // first few bytes.  If an oop in the old code was there, that oop
 500   // should not get GC'd.  Skip the first few bytes of oops on
 501   // not-entrant methods.
 502   address low_boundary = verified_entry_point();
 503   if (is_not_entrant()) {
 504     low_boundary += NativeJump::instruction_size;
 505     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 506     // (See comment above.)
 507   }
 508 
 509   // The RedefineClasses() API can cause the class unloading invariant
 510   // to no longer be true. See jvmtiExport.hpp for details.
 511   // Also, leave a debugging breadcrumb in local flag.
 512   if (JvmtiExport::has_redefined_a_class()) {
 513     // This set of the unloading_occurred flag is done before the
 514     // call to post_compiled_method_unload() so that the unloading
 515     // of this nmethod is reported.
 516     unloading_occurred = true;
 517   }
 518 
 519   // Exception cache
 520   clean_exception_cache(is_alive);
 521 
 522   // If class unloading occurred we first iterate over all inline caches and
 523   // clear ICs where the cached oop is referring to an unloaded klass or method.
 524   // The remaining live cached oops will be traversed in the relocInfo::oop_type
 525   // iteration below.
 526   if (unloading_occurred) {
 527     RelocIterator iter(this, low_boundary);
 528     while(iter.next()) {
 529       if (iter.type() == relocInfo::virtual_call_type) {
 530         CompiledIC *ic = CompiledIC_at(&iter);
 531         clean_ic_if_metadata_is_dead(ic, is_alive);
 532       }
 533     }
 534   }
 535 
 536   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 537     return;
 538   }
 539 
 540 #if INCLUDE_JVMCI
 541   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 542     return;
 543   }
 544 #endif
 545 
 546   // Ensure that all metadata is still alive
 547   verify_metadata_loaders(low_boundary, is_alive);
 548 }
 549 
 550 template <class CompiledICorStaticCall>
 551 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) {
 552   // Ok, to lookup references to zombies here
 553   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
 554   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
 555   if (nm != NULL) {
 556     if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
 557       // The nmethod has not been processed yet.
 558       return true;
 559     }
 560 
 561     // Clean inline caches pointing to both zombie and not_entrant methods
 562     if (!nm->is_in_use() || (nm->method()->code() != nm)) {
 563       ic->set_to_clean();
 564       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
 565     }
 566   }
 567 
 568   return false;
 569 }
 570 
 571 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) {
 572   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
 573 }
 574 
 575 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) {
 576   return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
 577 }
 578 
 579 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
 580   ResourceMark rm;
 581 
 582   // Make sure the oop's ready to receive visitors
 583   assert(!is_zombie() && !is_unloaded(),
 584          "should not call follow on zombie or unloaded nmethod");
 585 
 586   // If the method is not entrant then a JMP is plastered over the
 587   // first few bytes.  If an oop in the old code was there, that oop
 588   // should not get GC'd.  Skip the first few bytes of oops on
 589   // not-entrant methods.
 590   address low_boundary = verified_entry_point();
 591   if (is_not_entrant()) {
 592     low_boundary += NativeJump::instruction_size;
 593     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 594     // (See comment above.)
 595   }
 596 
 597   // The RedefineClasses() API can cause the class unloading invariant
 598   // to no longer be true. See jvmtiExport.hpp for details.
 599   // Also, leave a debugging breadcrumb in local flag.
 600   if (JvmtiExport::has_redefined_a_class()) {
 601     // This set of the unloading_occurred flag is done before the
 602     // call to post_compiled_method_unload() so that the unloading
 603     // of this nmethod is reported.
 604     unloading_occurred = true;
 605   }
 606 
 607   // Exception cache
 608   clean_exception_cache(is_alive);
 609 
 610   bool postponed = false;
 611 
 612   RelocIterator iter(this, low_boundary);
 613   while(iter.next()) {
 614 
 615     switch (iter.type()) {
 616 
 617     case relocInfo::virtual_call_type:
 618       if (unloading_occurred) {
 619         // If class unloading occurred we first iterate over all inline caches and
 620         // clear ICs where the cached oop is referring to an unloaded klass or method.
 621         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
 622       }
 623 
 624       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 625       break;
 626 
 627     case relocInfo::opt_virtual_call_type:
 628       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 629       break;
 630 
 631     case relocInfo::static_call_type:
 632       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 633       break;
 634 
 635     case relocInfo::oop_type:
 636       // handled by do_unloading_oops below
 637       break;
 638 
 639     case relocInfo::metadata_type:
 640       break; // nothing to do.
 641 
 642     default:
 643       break;
 644     }
 645   }
 646 
 647   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
 648     return postponed;
 649   }
 650 
 651 #if INCLUDE_JVMCI
 652   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
 653     return postponed;
 654   }
 655 #endif
 656 
 657   // Ensure that all metadata is still alive
 658   verify_metadata_loaders(low_boundary, is_alive);
 659 
 660   return postponed;
 661 }
 662 
 663 void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
 664   ResourceMark rm;
 665 
 666   // Make sure the oop's ready to receive visitors
 667   assert(!is_zombie(),
 668          "should not call follow on zombie nmethod");
 669 
 670   // If the method is not entrant then a JMP is plastered over the
 671   // first few bytes.  If an oop in the old code was there, that oop
 672   // should not get GC'd.  Skip the first few bytes of oops on
 673   // not-entrant methods.
 674   address low_boundary = verified_entry_point();
 675   if (is_not_entrant()) {
 676     low_boundary += NativeJump::instruction_size;
 677     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 678     // (See comment above.)
 679   }
 680 
 681   RelocIterator iter(this, low_boundary);
 682   while(iter.next()) {
 683 
 684     switch (iter.type()) {
 685 
 686     case relocInfo::virtual_call_type:
 687       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 688       break;
 689 
 690     case relocInfo::opt_virtual_call_type:
 691       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
 692       break;
 693 
 694     case relocInfo::static_call_type:
 695       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
 696       break;
 697 
 698     default:
 699       break;
 700     }
 701   }
 702 }