1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/compiledIC.hpp"
  27 #include "code/compiledMethod.inline.hpp"
  28 #include "code/scopeDesc.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "gc/shared/gcBehaviours.hpp"
  33 #include "interpreter/bytecode.inline.hpp"
  34 #include "logging/log.hpp"
  35 #include "logging/logTag.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/methodData.hpp"
  38 #include "oops/method.inline.hpp"
  39 #include "prims/methodHandles.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/mutexLocker.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 
  44 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
  45                                int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
  46                                bool caller_must_gc_arguments)
  47   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  48     _mark_for_deoptimization_status(not_marked),
  49     _method(method),
  50     _gc_data(NULL)
  51 {
  52   init_defaults();
  53 }
  54 
  55 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
  56                                int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
  57                                OopMapSet* oop_maps, bool caller_must_gc_arguments)
  58   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
  59              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
  60     _mark_for_deoptimization_status(not_marked),
  61     _method(method),
  62     _gc_data(NULL)
  63 {
  64   init_defaults();
  65 }
  66 
  67 void CompiledMethod::init_defaults() {
  68   _has_unsafe_access          = 0;
  69   _has_method_handle_invokes  = 0;
  70   _lazy_critical_native       = 0;
  71   _has_wide_vectors           = 0;
  72 }
  73 
  74 bool CompiledMethod::is_method_handle_return(address return_pc) {
  75   if (!has_method_handle_invokes())  return false;
  76   PcDesc* pd = pc_desc_at(return_pc);
  77   if (pd == NULL)
  78     return false;
  79   return pd->is_method_handle_invoke();
  80 }
  81 
  82 // Returns a string version of the method state.
  83 const char* CompiledMethod::state() const {
  84   int state = get_state();
  85   switch (state) {
  86   case not_installed:
  87     return "not installed";
  88   case in_use:
  89     return "in use";
  90   case not_used:
  91     return "not_used";
  92   case not_entrant:
  93     return "not_entrant";
  94   case zombie:
  95     return "zombie";
  96   case unloaded:
  97     return "unloaded";
  98   default:
  99     fatal("unexpected method state: %d", state);
 100     return NULL;
 101   }
 102 }
 103 
 104 //-----------------------------------------------------------------------------
 105 
 106 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
 107   return OrderAccess::load_acquire(&_exception_cache);
 108 }
 109 
 110 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
 111   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
 112   assert(new_entry != NULL,"Must be non null");
 113   assert(new_entry->next() == NULL, "Must be null");
 114 
 115   for (;;) {
 116     ExceptionCache *ec = exception_cache();
 117     if (ec != NULL) {
 118       Klass* ex_klass = ec->exception_type();
 119       if (!ex_klass->is_loader_alive()) {
 120         // We must guarantee that entries are not inserted with new next pointer
 121         // edges to ExceptionCache entries with dead klasses, due to bad interactions
 122         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
 123         // the head pointer forward to the first live ExceptionCache, so that the new
 124         // next pointers always point at live ExceptionCaches, that are not removed due
 125         // to concurrent ExceptionCache cleanup.
 126         ExceptionCache* next = ec->next();
 127         if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
 128           CodeCache::release_exception_cache(ec);
 129         }
 130         continue;
 131       }
 132       ec = exception_cache();
 133       if (ec != NULL) {
 134         new_entry->set_next(ec);
 135       }
 136     }
 137     if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
 138       return;
 139     }
 140   }
 141 }
 142 
 143 void CompiledMethod::clean_exception_cache() {
 144   // For each nmethod, only a single thread may call this cleanup function
 145   // at the same time, whether called in STW cleanup or concurrent cleanup.
 146   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
 147   // then a single writer may contend with cleaning up the head pointer to the
 148   // first ExceptionCache node that has a Klass* that is alive. That is fine,
 149   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
 150   // And the concurrent writers do not clean up next pointers, only the head.
 151   // Also note that concurent readers will walk through Klass* pointers that are not
 152   // alive. That does not cause ABA problems, because Klass* is deleted after
 153   // a handshake with all threads, after all stale ExceptionCaches have been
 154   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
 155   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
 156   // That similarly implies that CAS operations on ExceptionCache entries do not
 157   // suffer from ABA problems as unlinking and deletion is separated by a global
 158   // handshake operation.
 159   ExceptionCache* prev = NULL;
 160   ExceptionCache* curr = exception_cache_acquire();
 161 
 162   while (curr != NULL) {
 163     ExceptionCache* next = curr->next();
 164 
 165     if (!curr->exception_type()->is_loader_alive()) {
 166       if (prev == NULL) {
 167         // Try to clean head; this is contended by concurrent inserts, that
 168         // both lazily clean the head, and insert entries at the head. If
 169         // the CAS fails, the operation is restarted.
 170         if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
 171           prev = NULL;
 172           curr = exception_cache_acquire();
 173           continue;
 174         }
 175       } else {
 176         // It is impossible to during cleanup connect the next pointer to
 177         // an ExceptionCache that has not been published before a safepoint
 178         // prior to the cleanup. Therefore, release is not required.
 179         prev->set_next(next);
 180       }
 181       // prev stays the same.
 182 
 183       CodeCache::release_exception_cache(curr);
 184     } else {
 185       prev = curr;
 186     }
 187 
 188     curr = next;
 189   }
 190 }
 191 
 192 // public method for accessing the exception cache
 193 // These are the public access methods.
 194 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
 195   // We never grab a lock to read the exception cache, so we may
 196   // have false negatives. This is okay, as it can only happen during
 197   // the first few exception lookups for a given nmethod.
 198   ExceptionCache* ec = exception_cache_acquire();
 199   while (ec != NULL) {
 200     address ret_val;
 201     if ((ret_val = ec->match(exception,pc)) != NULL) {
 202       return ret_val;
 203     }
 204     ec = ec->next();
 205   }
 206   return NULL;
 207 }
 208 
 209 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
 210   // There are potential race conditions during exception cache updates, so we
 211   // must own the ExceptionCache_lock before doing ANY modifications. Because
 212   // we don't lock during reads, it is possible to have several threads attempt
 213   // to update the cache with the same data. We need to check for already inserted
 214   // copies of the current data before adding it.
 215 
 216   MutexLocker ml(ExceptionCache_lock);
 217   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
 218 
 219   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
 220     target_entry = new ExceptionCache(exception,pc,handler);
 221     add_exception_cache_entry(target_entry);
 222   }
 223 }
 224 
 225 // private method for handling exception cache
 226 // These methods are private, and used to manipulate the exception cache
 227 // directly.
 228 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
 229   ExceptionCache* ec = exception_cache_acquire();
 230   while (ec != NULL) {
 231     if (ec->match_exception_with_space(exception)) {
 232       return ec;
 233     }
 234     ec = ec->next();
 235   }
 236   return NULL;
 237 }
 238 
 239 //-------------end of code for ExceptionCache--------------
 240 
 241 bool CompiledMethod::is_at_poll_return(address pc) {
 242   RelocIterator iter(this, pc, pc+1);
 243   while (iter.next()) {
 244     if (iter.type() == relocInfo::poll_return_type)
 245       return true;
 246   }
 247   return false;
 248 }
 249 
 250 
 251 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
 252   RelocIterator iter(this, pc, pc+1);
 253   while (iter.next()) {
 254     relocInfo::relocType t = iter.type();
 255     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
 256       return true;
 257   }
 258   return false;
 259 }
 260 
 261 void CompiledMethod::verify_oop_relocations() {
 262   // Ensure sure that the code matches the current oop values
 263   RelocIterator iter(this, NULL, NULL);
 264   while (iter.next()) {
 265     if (iter.type() == relocInfo::oop_type) {
 266       oop_Relocation* reloc = iter.oop_reloc();
 267       if (!reloc->oop_is_immediate()) {
 268         reloc->verify_oop_relocation();
 269       }
 270     }
 271   }
 272 }
 273 
 274 
 275 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
 276   PcDesc* pd = pc_desc_at(pc);
 277   guarantee(pd != NULL, "scope must be present");
 278   return new ScopeDesc(this, pd->scope_decode_offset(),
 279                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 280                        pd->return_oop(), pd->return_vt());
 281 }
 282 
 283 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
 284   PcDesc* pd = pc_desc_near(pc);
 285   guarantee(pd != NULL, "scope must be present");
 286   return new ScopeDesc(this, pd->scope_decode_offset(),
 287                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
 288                        pd->return_oop(), pd->return_vt());
 289 }
 290 
 291 address CompiledMethod::oops_reloc_begin() const {
 292   // If the method is not entrant or zombie then a JMP is plastered over the
 293   // first few bytes.  If an oop in the old code was there, that oop
 294   // should not get GC'd.  Skip the first few bytes of oops on
 295   // not-entrant methods.
 296   if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
 297       code_begin() + frame_complete_offset() >
 298       verified_entry_point() + NativeJump::instruction_size)
 299   {
 300     // If we have a frame_complete_offset after the native jump, then there
 301     // is no point trying to look for oops before that. This is a requirement
 302     // for being allowed to scan oops concurrently.
 303     return code_begin() + frame_complete_offset();
 304   }
 305 
 306   // It is not safe to read oops concurrently using entry barriers, if their
 307   // location depend on whether the nmethod is entrant or not.
 308   assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan");
 309 
 310   address low_boundary = verified_entry_point();
 311   if (!is_in_use() && is_nmethod()) {
 312     low_boundary += NativeJump::instruction_size;
 313     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
 314     // This means that the low_boundary is going to be a little too high.
 315     // This shouldn't matter, since oops of non-entrant methods are never used.
 316     // In fact, why are we bothering to look at oops in a non-entrant method??
 317   }
 318   return low_boundary;
 319 }
 320 
 321 int CompiledMethod::verify_icholder_relocations() {
 322   ResourceMark rm;
 323   int count = 0;
 324 
 325   RelocIterator iter(this);
 326   while(iter.next()) {
 327     if (iter.type() == relocInfo::virtual_call_type) {
 328       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
 329         CompiledIC *ic = CompiledIC_at(&iter);
 330         if (TraceCompiledIC) {
 331           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
 332           ic->print();
 333         }
 334         assert(ic->cached_icholder() != NULL, "must be non-NULL");
 335         count++;
 336       }
 337     }
 338   }
 339 
 340   return count;
 341 }
 342 
 343 // Method that knows how to preserve outgoing arguments at call. This method must be
 344 // called with a frame corresponding to a Java invoke
 345 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
 346   if (method() != NULL && !method()->is_native()) {
 347     address pc = fr.pc();






 348     // The method attached by JIT-compilers should be used, if present.
 349     // Bytecode can be inaccurate in such case.
 350     Method* callee = attached_method_before_pc(pc);
 351     bool has_receiver = false;
 352     bool has_appendix = false;
 353     Symbol* signature = NULL;
 354     if (callee != NULL) {
 355       has_receiver = !(callee->access_flags().is_static());
 356       has_appendix = false;
 357       signature = callee->signature();
 358 
 359       // If value types are passed as fields, use the extended signature
 360       // which contains the types of all (oop) fields of the value type.
 361       if (callee->has_scalarized_args()) {
 362         const GrowableArray<SigEntry>* sig = callee->adapter()->get_sig_cc();
 363         signature = SigEntry::create_symbol(sig);
 364         has_receiver = false; // The extended signature contains the receiver type
 365       }
 366     } else {
 367       SimpleScopeDesc ssd(this, pc);
 368       Bytecode_invoke call(ssd.method(), ssd.bci());
 369       has_receiver = call.has_receiver();
 370       has_appendix = call.has_appendix();
 371       signature = call.signature();
 372     }
 373 
 374     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
 375   }
 376 }
 377 
 378 Method* CompiledMethod::attached_method(address call_instr) {
 379   assert(code_contains(call_instr), "not part of the nmethod");
 380   RelocIterator iter(this, call_instr, call_instr + 1);
 381   while (iter.next()) {
 382     if (iter.addr() == call_instr) {
 383       switch(iter.type()) {
 384         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
 385         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
 386         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
 387         default:                               break;
 388       }
 389     }
 390   }
 391   return NULL; // not found
 392 }
 393 
 394 Method* CompiledMethod::attached_method_before_pc(address pc) {
 395   if (NativeCall::is_call_before(pc)) {
 396     NativeCall* ncall = nativeCall_before(pc);
 397     return attached_method(ncall->instruction_address());
 398   }
 399   return NULL; // not a call
 400 }
 401 
 402 void CompiledMethod::clear_inline_caches() {
 403   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
 404   if (is_zombie()) {
 405     return;
 406   }
 407 
 408   RelocIterator iter(this);
 409   while (iter.next()) {
 410     iter.reloc()->clear_inline_cache();
 411   }
 412 }
 413 
 414 // Clear ICStubs of all compiled ICs
 415 void CompiledMethod::clear_ic_stubs() {
 416   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
 417   ResourceMark rm;
 418   RelocIterator iter(this);
 419   while(iter.next()) {
 420     if (iter.type() == relocInfo::virtual_call_type) {
 421       CompiledIC* ic = CompiledIC_at(&iter);
 422       ic->clear_ic_stub();
 423     }
 424   }
 425 }
 426 
 427 #ifdef ASSERT
 428 // Check class_loader is alive for this bit of metadata.
 429 static void check_class(Metadata* md) {
 430    Klass* klass = NULL;
 431    if (md->is_klass()) {
 432      klass = ((Klass*)md);
 433    } else if (md->is_method()) {
 434      klass = ((Method*)md)->method_holder();
 435    } else if (md->is_methodData()) {
 436      klass = ((MethodData*)md)->method()->method_holder();
 437    } else {
 438      md->print();
 439      ShouldNotReachHere();
 440    }
 441    assert(klass->is_loader_alive(), "must be alive");
 442 }
 443 #endif // ASSERT
 444 
 445 
 446 bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
 447   if (ic->is_clean()) {
 448     return true;
 449   }
 450   if (ic->is_icholder_call()) {
 451     // The only exception is compiledICHolder metdata which may
 452     // yet be marked below. (We check this further below).
 453     CompiledICHolder* cichk_metdata = ic->cached_icholder();
 454 
 455     if (cichk_metdata->is_loader_alive()) {
 456       return true;
 457     }
 458   } else {
 459     Metadata* ic_metdata = ic->cached_metadata();
 460     if (ic_metdata != NULL) {
 461       if (ic_metdata->is_klass()) {
 462         if (((Klass*)ic_metdata)->is_loader_alive()) {
 463           return true;
 464         }
 465       } else if (ic_metdata->is_method()) {
 466         Method* method = (Method*)ic_metdata;
 467         assert(!method->is_old(), "old method should have been cleaned");
 468         if (method->method_holder()->is_loader_alive()) {
 469           return true;
 470         }
 471       } else {
 472         ShouldNotReachHere();
 473       }
 474     }
 475   }
 476 
 477   return ic->set_to_clean();
 478 }
 479 
 480 // static_stub_Relocations may have dangling references to
 481 // nmethods so trim them out here.  Otherwise it looks like
 482 // compiled code is maintaining a link to dead metadata.
 483 void CompiledMethod::clean_ic_stubs() {
 484 #ifdef ASSERT
 485   address low_boundary = oops_reloc_begin();
 486   RelocIterator iter(this, low_boundary);
 487   while (iter.next()) {
 488     address static_call_addr = NULL;
 489     if (iter.type() == relocInfo::opt_virtual_call_type) {
 490       CompiledIC* cic = CompiledIC_at(&iter);
 491       if (!cic->is_call_to_interpreted()) {
 492         static_call_addr = iter.addr();
 493       }
 494     } else if (iter.type() == relocInfo::static_call_type) {
 495       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
 496       if (!csc->is_call_to_interpreted()) {
 497         static_call_addr = iter.addr();
 498       }
 499     }
 500     if (static_call_addr != NULL) {
 501       RelocIterator sciter(this, low_boundary);
 502       while (sciter.next()) {
 503         if (sciter.type() == relocInfo::static_stub_type &&
 504             sciter.static_stub_reloc()->static_call() == static_call_addr) {
 505           sciter.static_stub_reloc()->clear_inline_cache();
 506         }
 507       }
 508     }
 509   }
 510 #endif
 511 }
 512 
 513 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
 514 template <class CompiledICorStaticCall>
 515 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
 516                                          bool clean_all) {
 517   // Ok, to lookup references to zombies here
 518   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
 519   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
 520   if (nm != NULL) {
 521     // Clean inline caches pointing to both zombie and not_entrant methods
 522     if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
 523       if (!ic->set_to_clean(from->is_alive())) {
 524         return false;
 525       }
 526       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
 527     }
 528   }
 529   return true;
 530 }
 531 
 532 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
 533                                          bool clean_all) {
 534   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
 535 }
 536 
 537 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
 538                                          bool clean_all) {
 539   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
 540 }
 541 
 542 // Cleans caches in nmethods that point to either classes that are unloaded
 543 // or nmethods that are unloaded.
 544 //
 545 // Can be called either in parallel by G1 currently or after all
 546 // nmethods are unloaded.  Return postponed=true in the parallel case for
 547 // inline caches found that point to nmethods that are not yet visited during
 548 // the do_unloading walk.
 549 bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
 550   ResourceMark rm;
 551 
 552   // Exception cache only needs to be called if unloading occurred
 553   if (unloading_occurred) {
 554     clean_exception_cache();
 555   }
 556 
 557   if (!cleanup_inline_caches_impl(unloading_occurred, false)) {
 558     return false;
 559   }
 560 
 561   // All static stubs need to be cleaned.
 562   clean_ic_stubs();
 563 
 564   // Check that the metadata embedded in the nmethod is alive
 565   DEBUG_ONLY(metadata_do(check_class));
 566   return true;
 567 }
 568 
 569 void CompiledMethod::cleanup_inline_caches(bool clean_all) {
 570   for (;;) {
 571     ICRefillVerifier ic_refill_verifier;
 572     { CompiledICLocker ic_locker(this);
 573       if (cleanup_inline_caches_impl(false, clean_all)) {
 574         return;
 575       }
 576     }
 577     InlineCacheBuffer::refill_ic_stubs();
 578   }
 579 }
 580 
 581 // Called to clean up after class unloading for live nmethods and from the sweeper
 582 // for all methods.
 583 bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
 584   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
 585   ResourceMark rm;
 586 
 587   // Find all calls in an nmethod and clear the ones that point to non-entrant,
 588   // zombie and unloaded nmethods.
 589   RelocIterator iter(this, oops_reloc_begin());
 590   while(iter.next()) {
 591 
 592     switch (iter.type()) {
 593 
 594     case relocInfo::virtual_call_type:
 595       if (unloading_occurred) {
 596         // If class unloading occurred we first clear ICs where the cached metadata
 597         // is referring to an unloaded klass or method.
 598         if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) {
 599           return false;
 600         }
 601       }
 602 
 603       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
 604         return false;
 605       }
 606       break;
 607 
 608     case relocInfo::opt_virtual_call_type:
 609       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
 610         return false;
 611       }
 612       break;
 613 
 614     case relocInfo::static_call_type:
 615       if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) {
 616         return false;
 617       }
 618       break;
 619 
 620     default:
 621       break;
 622     }
 623   }
 624 
 625   return true;
 626 }
 627 
 628 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
 629 // to not be inherently safe. There is a chance that fields are seen which are not properly
 630 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
 631 // to be held.
 632 // To bundle knowledge about necessary checks in one place, this function was introduced.
 633 // It is not claimed that these checks are sufficient, but they were found to be necessary.
 634 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
 635   Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
 636   return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
 637          !nm->is_zombie() && !nm->is_not_installed() &&
 638          os::is_readable_pointer(method) &&
 639          os::is_readable_pointer(method->constants()) &&
 640          os::is_readable_pointer(method->signature());
 641 }
--- EOF ---