1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/compiledIC.hpp"
  27 #include "code/compiledMethod.inline.hpp"
  28 #include "code/exceptionHandlerTable.hpp"
  29 #include "code/scopeDesc.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "gc/shared/gcBehaviours.hpp"
  34 #include "interpreter/bytecode.inline.hpp"
  35 #include "logging/log.hpp"
  36 #include "logging/logTag.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/method.inline.hpp"
  40 #include "prims/methodHandles.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/mutexLocker.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 
  46 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
  47                                int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
  48                                bool caller_must_gc_arguments)
  49   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  50     _mark_for_deoptimization_status(not_marked),
  51     _method(method),
  52     _gc_data(NULL)
  53 {
  54   init_defaults();
  55 }
  56 
  57 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
  58                                int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
  59                                OopMapSet* oop_maps, bool caller_must_gc_arguments)
  60   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
  61              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  62     _mark_for_deoptimization_status(not_marked),
  63     _method(method),
  64     _gc_data(NULL)
  65 {
  66   init_defaults();
  67 }
  68 
  69 void CompiledMethod::init_defaults() {
  70   _has_unsafe_access          = 0;
  71   _has_method_handle_invokes  = 0;
  72   _lazy_critical_native       = 0;
  73   _has_wide_vectors           = 0;
  74 }
  75 
  76 bool CompiledMethod::is_method_handle_return(address return_pc) {
  77   if (!has_method_handle_invokes())  return false;
  78   PcDesc* pd = pc_desc_at(return_pc);
  79   if (pd == NULL)
  80     return false;
  81   return pd->is_method_handle_invoke();
  82 }
  83 
  84 // Returns a string version of the method state.
  85 const char* CompiledMethod::state() const {
  86   int state = get_state();
  87   switch (state) {
  88   case not_installed:
  89     return "not installed";
  90   case in_use:
  91     return "in use";
  92   case not_used:
  93     return "not_used";
  94   case not_entrant:
  95     return "not_entrant";
  96   case zombie:
  97     return "zombie";
  98   case unloaded:
  99     return "unloaded";
 100   default:
 101     fatal("unexpected method state: %d", state);
 102     return NULL;
 103   }
 104 }
 105 
 106 //-----------------------------------------------------------------------------
 107 void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) {
 108   MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock,
 109                  Mutex::_no_safepoint_check_flag);
 110   _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
 111 }
 112 
 113 //-----------------------------------------------------------------------------
 114 
 115 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
 116   return Atomic::load_acquire(&_exception_cache);
 117 }
 118 
 119 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
 120   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
 121   assert(new_entry != NULL,"Must be non null");
 122   assert(new_entry->next() == NULL, "Must be null");
 123 
 124   for (;;) {
 125     ExceptionCache *ec = exception_cache();
 126     if (ec != NULL) {
 127       Klass* ex_klass = ec->exception_type();
 128       if (!ex_klass->is_loader_alive()) {
 129         // We must guarantee that entries are not inserted with new next pointer
 130         // edges to ExceptionCache entries with dead klasses, due to bad interactions
 131         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
 132         // the head pointer forward to the first live ExceptionCache, so that the new
 133         // next pointers always point at live ExceptionCaches, that are not removed due
 134         // to concurrent ExceptionCache cleanup.
 135         ExceptionCache* next = ec->next();
 136         if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
 137           CodeCache::release_exception_cache(ec);
 138         }
 139         continue;
 140       }
 141       ec = exception_cache();
 142       if (ec != NULL) {
 143         new_entry->set_next(ec);
 144       }
 145     }
 146     if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
 147       return;
 148     }
 149   }
 150 }
 151 
 152 void CompiledMethod::clean_exception_cache() {
 153   // For each nmethod, only a single thread may call this cleanup function
 154   // at the same time, whether called in STW cleanup or concurrent cleanup.
 155   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
 156   // then a single writer may contend with cleaning up the head pointer to the
 157   // first ExceptionCache node that has a Klass* that is alive. That is fine,
 158   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
 159   // And the concurrent writers do not clean up next pointers, only the head.
 160   // Also note that concurent readers will walk through Klass* pointers that are not
 161   // alive. That does not cause ABA problems, because Klass* is deleted after
 162   // a handshake with all threads, after all stale ExceptionCaches have been
 163   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
 164   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
 165   // That similarly implies that CAS operations on ExceptionCache entries do not
 166   // suffer from ABA problems as unlinking and deletion is separated by a global
 167   // handshake operation.
 168   ExceptionCache* prev = NULL;
 169   ExceptionCache* curr = exception_cache_acquire();
 170 
 171   while (curr != NULL) {
 172     ExceptionCache* next = curr->next();
 173 
 174     if (!curr->exception_type()->is_loader_alive()) {
 175       if (prev == NULL) {
 176         // Try to clean head; this is contended by concurrent inserts, that
 177         // both lazily clean the head, and insert entries at the head. If
 178         // the CAS fails, the operation is restarted.
 179         if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
 180           prev = NULL;
 181           curr = exception_cache_acquire();
 182           continue;
 183         }
 184       } else {
 185         // It is impossible to during cleanup connect the next pointer to
 186         // an ExceptionCache that has not been published before a safepoint
 187         // prior to the cleanup. Therefore, release is not required.
 188         prev->set_next(next);
 189       }
 190       // prev stays the same.
 191 
 192       CodeCache::release_exception_cache(curr);
 193     } else {
 194       prev = curr;
 195     }
 196 
 197     curr = next;
 198   }
 199 }
 200 
 201 // public method for accessing the exception cache
 202 // These are the public access methods.
 203 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
 204   // We never grab a lock to read the exception cache, so we may
 205   // have false negatives. This is okay, as it can only happen during
 206   // the first few exception lookups for a given nmethod.
 207   ExceptionCache* ec = exception_cache_acquire();
 208   while (ec != NULL) {
 209     address ret_val;
 210     if ((ret_val = ec->match(exception,pc)) != NULL) {
 211       return ret_val;
 212     }
 213     ec = ec->next();
 214   }
 215   return NULL;
 216 }
 217 
 218 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
 219   // There are potential race conditions during exception cache updates, so we
 220   // must own the ExceptionCache_lock before doing ANY modifications. Because
 221   // we don't lock during reads, it is possible to have several threads attempt
 222   // to update the cache with the same data. We need to check for already inserted
 223   // copies of the current data before adding it.
 224 
 225   MutexLocker ml(ExceptionCache_lock);
 226   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
 227 
 228   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
 229     target_entry = new ExceptionCache(exception,pc,handler);
 230     add_exception_cache_entry(target_entry);
 231   }
 232 }
 233 
 234 // private method for handling exception cache
 235 // These methods are private, and used to manipulate the exception cache
 236 // directly.
 237 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
 238   ExceptionCache* ec = exception_cache_acquire();
 239   while (ec != NULL) {
 240     if (ec->match_exception_with_space(exception)) {
 241       return ec;
 242     }
 243     ec = ec->next();
 244   }
 245   return NULL;
 246 }
 247 
 248 //-------------end of code for ExceptionCache--------------
 249 
 250 bool CompiledMethod::is_at_poll_return(address pc) {
 251   RelocIterator iter(this, pc, pc+1);
 252   while (iter.next()) {
 253     if (iter.type() == relocInfo::poll_return_type)
 254       return true;
 255   }
 256   return false;
 257 }
 258 
 259 
 260 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
 261   RelocIterator iter(this, pc, pc+1);
 262   while (iter.next()) {
 263     relocInfo::relocType t = iter.type();
 264     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
 265       return true;
 266   }
 267   return false;
 268 }
 269 
 270 void CompiledMethod::verify_oop_relocations() {
 271   // Ensure sure that the code matches the current oop values
 272   RelocIterator iter(this, NULL, NULL);
 273   while (iter.next()) {
 274     if (iter.type() == relocInfo::oop_type) {
 275       oop_Relocation* reloc = iter.oop_reloc();
 276       if (!reloc->oop_is_immediate()) {
 277         reloc->verify_oop_relocation();
 278       }
 279     }
 280   }
 281 }
 282 
 283 
 284 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
 285   PcDesc* pd = pc_desc_at(pc);
 286   guarantee(pd != NULL, "scope must be present");
 287   return new ScopeDesc(this, pd->scope_decode_offset(),
 288                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 289                        pd->return_oop());
 290 }
 291 
 292 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
 293   PcDesc* pd = pc_desc_near(pc);
 294   guarantee(pd != NULL, "scope must be present");
 295   return new ScopeDesc(this, pd->scope_decode_offset(),
 296                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 297                        pd->return_oop());
 298 }
 299 
 300 address CompiledMethod::oops_reloc_begin() const {
 301   // If the method is not entrant or zombie then a JMP is plastered over the
 302   // first few bytes.  If an oop in the old code was there, that oop
 303   // should not get GC'd.  Skip the first few bytes of oops on
 304   // not-entrant methods.
 305   if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
 306       code_begin() + frame_complete_offset() >
 307       verified_entry_point() + NativeJump::instruction_size)
 308   {
 309     // If we have a frame_complete_offset after the native jump, then there
 310     // is no point trying to look for oops before that. This is a requirement
 311     // for being allowed to scan oops concurrently.
 312     return code_begin() + frame_complete_offset();
 313   }
 314 
 315   // It is not safe to read oops concurrently using entry barriers, if their
 316   // location depend on whether the nmethod is entrant or not.
 317   assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan");
 318 
 319   address low_boundary = verified_entry_point();
 320   if (!is_in_use() && is_nmethod()) {
 321     low_boundary += NativeJump::instruction_size;
 322     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 323     // This means that the low_boundary is going to be a little too high.
 324     // This shouldn't matter, since oops of non-entrant methods are never used.
 325     // In fact, why are we bothering to look at oops in a non-entrant method??
 326   }
 327   return low_boundary;
 328 }
 329 
 330 int CompiledMethod::verify_icholder_relocations() {
 331   ResourceMark rm;
 332   int count = 0;
 333 
 334   RelocIterator iter(this);
 335   while(iter.next()) {
 336     if (iter.type() == relocInfo::virtual_call_type) {
 337       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
 338         CompiledIC *ic = CompiledIC_at(&iter);
 339         if (TraceCompiledIC) {
 340           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
 341           ic->print();
 342         }
 343         assert(ic->cached_icholder() != NULL, "must be non-NULL");
 344         count++;
 345       }
 346     }
 347   }
 348 
 349   return count;
 350 }
 351 
 352 // Method that knows how to preserve outgoing arguments at call. This method must be
 353 // called with a frame corresponding to a Java invoke
 354 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
 355   if (method() != NULL && !method()->is_native()) {
 356     address pc = fr.pc();
 357     SimpleScopeDesc ssd(this, pc);
 358     Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
 359     bool has_receiver = call.has_receiver();
 360     bool has_appendix = call.has_appendix();
 361     Symbol* signature = call.signature();
 362 
 363     // The method attached by JIT-compilers should be used, if present.
 364     // Bytecode can be inaccurate in such case.
 365     Method* callee = attached_method_before_pc(pc);
 366     if (callee != NULL) {
 367       has_receiver = !(callee->access_flags().is_static());
 368       has_appendix = false;
 369       signature = callee->signature();
 370     }
 371 
 372     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
 373   }
 374 }
 375 
 376 Method* CompiledMethod::attached_method(address call_instr) {
 377   assert(code_contains(call_instr), "not part of the nmethod");
 378   RelocIterator iter(this, call_instr, call_instr + 1);
 379   while (iter.next()) {
 380     if (iter.addr() == call_instr) {
 381       switch(iter.type()) {
 382         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
 383         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
 384         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
 385         default:                               break;
 386       }
 387     }
 388   }
 389   return NULL; // not found
 390 }
 391 
 392 Method* CompiledMethod::attached_method_before_pc(address pc) {
 393   if (NativeCall::is_call_before(pc)) {
 394     NativeCall* ncall = nativeCall_before(pc);
 395     return attached_method(ncall->instruction_address());
 396   }
 397   return NULL; // not a call
 398 }
 399 
 400 void CompiledMethod::clear_inline_caches() {
 401   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
 402   if (is_zombie()) {
 403     return;
 404   }
 405 
 406   RelocIterator iter(this);
 407   while (iter.next()) {
 408     iter.reloc()->clear_inline_cache();
 409   }
 410 }
 411 
 412 // Clear IC callsites, releasing ICStubs of all compiled ICs
 413 // as well as any associated CompiledICHolders.
 414 void CompiledMethod::clear_ic_callsites() {
 415   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
 416   ResourceMark rm;
 417   RelocIterator iter(this);
 418   while(iter.next()) {
 419     if (iter.type() == relocInfo::virtual_call_type) {
 420       CompiledIC* ic = CompiledIC_at(&iter);
 421       ic->set_to_clean(false);
 422     }
 423   }
 424 }
 425 
 426 #ifdef ASSERT
 427 // Check class_loader is alive for this bit of metadata.
 428 class CheckClass : public MetadataClosure {
 429   void do_metadata(Metadata* md) {
 430     Klass* klass = NULL;
 431     if (md->is_klass()) {
 432       klass = ((Klass*)md);
 433     } else if (md->is_method()) {
 434       klass = ((Method*)md)->method_holder();
 435     } else if (md->is_methodData()) {
 436       klass = ((MethodData*)md)->method()->method_holder();
 437     } else {
 438       md->print();
 439       ShouldNotReachHere();
 440     }
 441     assert(klass->is_loader_alive(), "must be alive");
 442   }
 443 };
 444 #endif // ASSERT
 445 
 446 
 447 bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
 448   if (ic->is_clean()) {
 449     return true;
 450   }
 451   if (ic->is_icholder_call()) {
 452     // The only exception is compiledICHolder metdata which may
 453     // yet be marked below. (We check this further below).
 454     CompiledICHolder* cichk_metdata = ic->cached_icholder();
 455 
 456     if (cichk_metdata->is_loader_alive()) {
 457       return true;
 458     }
 459   } else {
 460     Metadata* ic_metdata = ic->cached_metadata();
 461     if (ic_metdata != NULL) {
 462       if (ic_metdata->is_klass()) {
 463         if (((Klass*)ic_metdata)->is_loader_alive()) {
 464           return true;
 465         }
 466       } else if (ic_metdata->is_method()) {
 467         Method* method = (Method*)ic_metdata;
 468         assert(!method->is_old(), "old method should have been cleaned");
 469         if (method->method_holder()->is_loader_alive()) {
 470           return true;
 471         }
 472       } else {
 473         ShouldNotReachHere();
 474       }
 475     }
 476   }
 477 
 478   return ic->set_to_clean();
 479 }
 480 
 481 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
 482 template <class CompiledICorStaticCall>
 483 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
 484                                          bool clean_all) {
 485   // Ok, to lookup references to zombies here
 486   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
 487   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
 488   if (nm != NULL) {
 489     // Clean inline caches pointing to both zombie and not_entrant methods
 490     if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
 491       if (!ic->set_to_clean(from->is_alive())) {
 492         return false;
 493       }
 494       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
 495     }
 496   }
 497   return true;
 498 }
 499 
 500 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
 501                                          bool clean_all) {
 502   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
 503 }
 504 
 505 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
 506                                          bool clean_all) {
 507   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
 508 }
 509 
 510 // Cleans caches in nmethods that point to either classes that are unloaded
 511 // or nmethods that are unloaded.
 512 //
 513 // Can be called either in parallel by G1 currently or after all
 514 // nmethods are unloaded.  Return postponed=true in the parallel case for
 515 // inline caches found that point to nmethods that are not yet visited during
 516 // the do_unloading walk.
 517 bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
 518   ResourceMark rm;
 519 
 520   // Exception cache only needs to be called if unloading occurred
 521   if (unloading_occurred) {
 522     clean_exception_cache();
 523   }
 524 
 525   if (!cleanup_inline_caches_impl(unloading_occurred, false)) {
 526     return false;
 527   }
 528 
 529 #ifdef ASSERT
 530   // Check that the metadata embedded in the nmethod is alive
 531   CheckClass check_class;
 532   metadata_do(&check_class);
 533 #endif
 534   return true;
 535 }
 536 
 537 void CompiledMethod::cleanup_inline_caches(bool clean_all) {
 538   for (;;) {
 539     ICRefillVerifier ic_refill_verifier;
 540     { CompiledICLocker ic_locker(this);
 541       if (cleanup_inline_caches_impl(false, clean_all)) {
 542         return;
 543       }
 544     }
 545     InlineCacheBuffer::refill_ic_stubs();
 546   }
 547 }
 548 
 549 // Called to clean up after class unloading for live nmethods and from the sweeper
 550 // for all methods.
 551 bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
 552   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
 553   ResourceMark rm;
 554 
 555   // Find all calls in an nmethod and clear the ones that point to non-entrant,
 556   // zombie and unloaded nmethods.
 557   RelocIterator iter(this, oops_reloc_begin());
 558   bool is_in_static_stub = false;
 559   while(iter.next()) {
 560 
 561     switch (iter.type()) {
 562 
 563     case relocInfo::virtual_call_type:
 564       if (unloading_occurred) {
 565         // If class unloading occurred we first clear ICs where the cached metadata
 566         // is referring to an unloaded klass or method.
 567         if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) {
 568           return false;
 569         }
 570       }
 571 
 572       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
 573         return false;
 574       }
 575       break;
 576 
 577     case relocInfo::opt_virtual_call_type:
 578       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
 579         return false;
 580       }
 581       break;
 582 
 583     case relocInfo::static_call_type:
 584       if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) {
 585         return false;
 586       }
 587       break;
 588 
 589     case relocInfo::static_stub_type: {
 590       is_in_static_stub = true;
 591       break;
 592     }
 593 
 594     case relocInfo::metadata_type: {
 595       // Only the metadata relocations contained in static/opt virtual call stubs
 596       // contains the Method* passed to c2i adapters. It is the only metadata
 597       // relocation that needs to be walked, as it is the one metadata relocation
 598       // that violates the invariant that all metadata relocations have an oop
 599       // in the compiled method (due to deferred resolution and code patching).
 600 
 601       // This causes dead metadata to remain in compiled methods that are not
 602       // unloading. Unless these slippery metadata relocations of the static
 603       // stubs are at least cleared, subsequent class redefinition operations
 604       // will access potentially free memory, and JavaThread execution
 605       // concurrent to class unloading may call c2i adapters with dead methods.
 606       if (!is_in_static_stub) {
 607         // The first metadata relocation after a static stub relocation is the
 608         // metadata relocation of the static stub used to pass the Method* to
 609         // c2i adapters.
 610         continue;
 611       }
 612       is_in_static_stub = false;
 613       metadata_Relocation* r = iter.metadata_reloc();
 614       Metadata* md = r->metadata_value();
 615       if (md != NULL && md->is_method()) {
 616         Method* method = static_cast<Method*>(md);
 617         if (!method->method_holder()->is_loader_alive()) {
 618           Atomic::store(r->metadata_addr(), (Method*)NULL);
 619 
 620           if (!r->metadata_is_immediate()) {
 621             r->fix_metadata_relocation();
 622           }
 623         }
 624       }
 625       break;
 626     }
 627 
 628     default:
 629       break;
 630     }
 631   }
 632 
 633   return true;
 634 }
 635 
 636 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
 637 // to not be inherently safe. There is a chance that fields are seen which are not properly
 638 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
 639 // to be held.
 640 // To bundle knowledge about necessary checks in one place, this function was introduced.
 641 // It is not claimed that these checks are sufficient, but they were found to be necessary.
 642 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
 643   Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
 644   return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
 645          !nm->is_zombie() && !nm->is_not_installed() &&
 646          os::is_readable_pointer(method) &&
 647          os::is_readable_pointer(method->constants()) &&
 648          os::is_readable_pointer(method->signature());
 649 }
 650 
 651 address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
 652   // Exception happened outside inline-cache check code => we are inside
 653   // an active nmethod => use cpc to determine a return address
 654   int exception_offset = pc - code_begin();
 655   int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
 656 #ifdef ASSERT
 657   if (cont_offset == 0) {
 658     Thread* thread = Thread::current();
 659     ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
 660     HandleMark hm(thread);
 661     ResourceMark rm(thread);
 662     CodeBlob* cb = CodeCache::find_blob(pc);
 663     assert(cb != NULL && cb == this, "");
 664     ttyLocker ttyl;
 665     tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
 666     print();
 667     method()->print_codes();
 668     print_code();
 669     print_pcs();
 670   }
 671 #endif
 672   if (cont_offset == 0) {
 673     // Let the normal error handling report the exception
 674     return NULL;
 675   }
 676   if (cont_offset == exception_offset) {
 677 #if INCLUDE_JVMCI
 678     Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
 679     JavaThread *thread = JavaThread::current();
 680     thread->set_jvmci_implicit_exception_pc(pc);
 681     thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
 682                                                                          Deoptimization::Action_reinterpret));
 683     return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
 684 #else
 685     ShouldNotReachHere();
 686 #endif
 687   }
 688   return code_begin() + cont_offset;
 689 }
 690 
 691 class HasEvolDependency : public MetadataClosure {
 692   bool _has_evol_dependency;
 693  public:
 694   HasEvolDependency() : _has_evol_dependency(false) {}
 695   void do_metadata(Metadata* md) {
 696     if (md->is_method()) {
 697       Method* method = (Method*)md;
 698       if (method->is_old()) {
 699         _has_evol_dependency = true;
 700       }
 701     }
 702   }
 703   bool has_evol_dependency() const { return _has_evol_dependency; }
 704 };
 705 
 706 bool CompiledMethod::has_evol_metadata() {
 707   // Check the metadata in relocIter and CompiledIC and also deoptimize
 708   // any nmethod that has reference to old methods.
 709   HasEvolDependency check_evol;
 710   metadata_do(&check_evol);
 711   if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {
 712     ResourceMark rm;
 713     log_debug(redefine, class, nmethod)
 714             ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",
 715              _method->method_holder()->external_name(),
 716              _method->name()->as_C_string(),
 717              _method->signature()->as_C_string(),
 718              compile_id());
 719   }
 720   return check_evol.has_evol_dependency();
 721 }