1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/compiledIC.hpp" 27 #include "code/compiledMethod.inline.hpp" 28 #include "code/scopeDesc.hpp" 29 #include "code/codeCache.hpp" 30 #include "code/icBuffer.hpp" 31 #include "gc/shared/barrierSet.hpp" 32 #include "gc/shared/gcBehaviours.hpp" 33 #include "interpreter/bytecode.inline.hpp" 34 #include "logging/log.hpp" 35 #include "logging/logTag.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/methodData.hpp" 38 #include "oops/method.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/mutexLocker.hpp" 42 43 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, 44 int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, 45 bool caller_must_gc_arguments) 46 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 47 _mark_for_deoptimization_status(not_marked), 48 _method(method), 49 _gc_data(NULL) 50 { 51 init_defaults(); 52 } 53 54 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, 55 int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, 56 OopMapSet* oop_maps, bool caller_must_gc_arguments) 57 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, 58 frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 59 _mark_for_deoptimization_status(not_marked), 60 _method(method), 61 _gc_data(NULL) 62 { 63 init_defaults(); 64 } 65 66 void CompiledMethod::init_defaults() { 67 _has_unsafe_access = 0; 68 _has_method_handle_invokes = 0; 69 _lazy_critical_native = 0; 70 _has_wide_vectors = 0; 71 } 72 73 bool CompiledMethod::is_method_handle_return(address return_pc) { 74 if (!has_method_handle_invokes()) return false; 75 PcDesc* pd = pc_desc_at(return_pc); 76 if (pd == NULL) 77 return false; 78 return pd->is_method_handle_invoke(); 79 } 80 81 // Returns a string version of the method state. 82 const char* CompiledMethod::state() const { 83 int state = get_state(); 84 switch (state) { 85 case not_installed: 86 return "not installed"; 87 case in_use: 88 return "in use"; 89 case not_used: 90 return "not_used"; 91 case not_entrant: 92 return "not_entrant"; 93 case zombie: 94 return "zombie"; 95 case unloaded: 96 return "unloaded"; 97 default: 98 fatal("unexpected method state: %d", state); 99 return NULL; 100 } 101 } 102 103 //----------------------------------------------------------------------------- 104 105 ExceptionCache* CompiledMethod::exception_cache_acquire() const { 106 return OrderAccess::load_acquire(&_exception_cache); 107 } 108 109 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { 110 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); 111 assert(new_entry != NULL,"Must be non null"); 112 assert(new_entry->next() == NULL, "Must be null"); 113 114 for (;;) { 115 ExceptionCache *ec = exception_cache(); 116 if (ec != NULL) { 117 Klass* ex_klass = ec->exception_type(); 118 if (!ex_klass->is_loader_alive()) { 119 // We must guarantee that entries are not inserted with new next pointer 120 // edges to ExceptionCache entries with dead klasses, due to bad interactions 121 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll 122 // the head pointer forward to the first live ExceptionCache, so that the new 123 // next pointers always point at live ExceptionCaches, that are not removed due 124 // to concurrent ExceptionCache cleanup. 125 ExceptionCache* next = ec->next(); 126 if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) { 127 CodeCache::release_exception_cache(ec); 128 } 129 continue; 130 } 131 ec = exception_cache(); 132 if (ec != NULL) { 133 new_entry->set_next(ec); 134 } 135 } 136 if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) { 137 return; 138 } 139 } 140 } 141 142 void CompiledMethod::clean_exception_cache() { 143 // For each nmethod, only a single thread may call this cleanup function 144 // at the same time, whether called in STW cleanup or concurrent cleanup. 145 // Note that if the GC is processing exception cache cleaning in a concurrent phase, 146 // then a single writer may contend with cleaning up the head pointer to the 147 // first ExceptionCache node that has a Klass* that is alive. That is fine, 148 // as long as there is no concurrent cleanup of next pointers from concurrent writers. 149 // And the concurrent writers do not clean up next pointers, only the head. 150 // Also note that concurent readers will walk through Klass* pointers that are not 151 // alive. That does not cause ABA problems, because Klass* is deleted after 152 // a handshake with all threads, after all stale ExceptionCaches have been 153 // unlinked. That is also when the CodeCache::exception_cache_purge_list() 154 // is deleted, with all ExceptionCache entries that were cleaned concurrently. 155 // That similarly implies that CAS operations on ExceptionCache entries do not 156 // suffer from ABA problems as unlinking and deletion is separated by a global 157 // handshake operation. 158 ExceptionCache* prev = NULL; 159 ExceptionCache* curr = exception_cache_acquire(); 160 161 while (curr != NULL) { 162 ExceptionCache* next = curr->next(); 163 164 if (!curr->exception_type()->is_loader_alive()) { 165 if (prev == NULL) { 166 // Try to clean head; this is contended by concurrent inserts, that 167 // both lazily clean the head, and insert entries at the head. If 168 // the CAS fails, the operation is restarted. 169 if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) { 170 prev = NULL; 171 curr = exception_cache_acquire(); 172 continue; 173 } 174 } else { 175 // It is impossible to during cleanup connect the next pointer to 176 // an ExceptionCache that has not been published before a safepoint 177 // prior to the cleanup. Therefore, release is not required. 178 prev->set_next(next); 179 } 180 // prev stays the same. 181 182 CodeCache::release_exception_cache(curr); 183 } else { 184 prev = curr; 185 } 186 187 curr = next; 188 } 189 } 190 191 // public method for accessing the exception cache 192 // These are the public access methods. 193 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { 194 // We never grab a lock to read the exception cache, so we may 195 // have false negatives. This is okay, as it can only happen during 196 // the first few exception lookups for a given nmethod. 197 ExceptionCache* ec = exception_cache_acquire(); 198 while (ec != NULL) { 199 address ret_val; 200 if ((ret_val = ec->match(exception,pc)) != NULL) { 201 return ret_val; 202 } 203 ec = ec->next(); 204 } 205 return NULL; 206 } 207 208 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { 209 // There are potential race conditions during exception cache updates, so we 210 // must own the ExceptionCache_lock before doing ANY modifications. Because 211 // we don't lock during reads, it is possible to have several threads attempt 212 // to update the cache with the same data. We need to check for already inserted 213 // copies of the current data before adding it. 214 215 MutexLocker ml(ExceptionCache_lock); 216 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); 217 218 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { 219 target_entry = new ExceptionCache(exception,pc,handler); 220 add_exception_cache_entry(target_entry); 221 } 222 } 223 224 // private method for handling exception cache 225 // These methods are private, and used to manipulate the exception cache 226 // directly. 227 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { 228 ExceptionCache* ec = exception_cache_acquire(); 229 while (ec != NULL) { 230 if (ec->match_exception_with_space(exception)) { 231 return ec; 232 } 233 ec = ec->next(); 234 } 235 return NULL; 236 } 237 238 //-------------end of code for ExceptionCache-------------- 239 240 bool CompiledMethod::is_at_poll_return(address pc) { 241 RelocIterator iter(this, pc, pc+1); 242 while (iter.next()) { 243 if (iter.type() == relocInfo::poll_return_type) 244 return true; 245 } 246 return false; 247 } 248 249 250 bool CompiledMethod::is_at_poll_or_poll_return(address pc) { 251 RelocIterator iter(this, pc, pc+1); 252 while (iter.next()) { 253 relocInfo::relocType t = iter.type(); 254 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) 255 return true; 256 } 257 return false; 258 } 259 260 void CompiledMethod::verify_oop_relocations() { 261 // Ensure sure that the code matches the current oop values 262 RelocIterator iter(this, NULL, NULL); 263 while (iter.next()) { 264 if (iter.type() == relocInfo::oop_type) { 265 oop_Relocation* reloc = iter.oop_reloc(); 266 if (!reloc->oop_is_immediate()) { 267 reloc->verify_oop_relocation(); 268 } 269 } 270 } 271 } 272 273 274 ScopeDesc* CompiledMethod::scope_desc_at(address pc) { 275 PcDesc* pd = pc_desc_at(pc); 276 guarantee(pd != NULL, "scope must be present"); 277 return new ScopeDesc(this, pd->scope_decode_offset(), 278 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 279 pd->return_oop()); 280 } 281 282 ScopeDesc* CompiledMethod::scope_desc_near(address pc) { 283 PcDesc* pd = pc_desc_near(pc); 284 guarantee(pd != NULL, "scope must be present"); 285 return new ScopeDesc(this, pd->scope_decode_offset(), 286 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 287 pd->return_oop()); 288 } 289 290 address CompiledMethod::oops_reloc_begin() const { 291 // If the method is not entrant or zombie then a JMP is plastered over the 292 // first few bytes. If an oop in the old code was there, that oop 293 // should not get GC'd. Skip the first few bytes of oops on 294 // not-entrant methods. 295 if (frame_complete_offset() != CodeOffsets::frame_never_safe && 296 code_begin() + frame_complete_offset() > 297 verified_entry_point() + NativeJump::instruction_size) 298 { 299 // If we have a frame_complete_offset after the native jump, then there 300 // is no point trying to look for oops before that. This is a requirement 301 // for being allowed to scan oops concurrently. 302 return code_begin() + frame_complete_offset(); 303 } 304 305 // It is not safe to read oops concurrently using entry barriers, if their 306 // location depend on whether the nmethod is entrant or not. 307 assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan"); 308 309 address low_boundary = verified_entry_point(); 310 if (!is_in_use() && is_nmethod()) { 311 low_boundary += NativeJump::instruction_size; 312 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 313 // This means that the low_boundary is going to be a little too high. 314 // This shouldn't matter, since oops of non-entrant methods are never used. 315 // In fact, why are we bothering to look at oops in a non-entrant method?? 316 } 317 return low_boundary; 318 } 319 320 int CompiledMethod::verify_icholder_relocations() { 321 ResourceMark rm; 322 int count = 0; 323 324 RelocIterator iter(this); 325 while(iter.next()) { 326 if (iter.type() == relocInfo::virtual_call_type) { 327 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) { 328 CompiledIC *ic = CompiledIC_at(&iter); 329 if (TraceCompiledIC) { 330 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); 331 ic->print(); 332 } 333 assert(ic->cached_icholder() != NULL, "must be non-NULL"); 334 count++; 335 } 336 } 337 } 338 339 return count; 340 } 341 342 // Method that knows how to preserve outgoing arguments at call. This method must be 343 // called with a frame corresponding to a Java invoke 344 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { 345 if (method() != NULL && !method()->is_native()) { 346 address pc = fr.pc(); 347 SimpleScopeDesc ssd(this, pc); 348 Bytecode_invoke call(ssd.method(), ssd.bci()); 349 bool has_receiver = call.has_receiver(); 350 bool has_appendix = call.has_appendix(); 351 Symbol* signature = call.signature(); 352 353 // The method attached by JIT-compilers should be used, if present. 354 // Bytecode can be inaccurate in such case. 355 Method* callee = attached_method_before_pc(pc); 356 if (callee != NULL) { 357 has_receiver = !(callee->access_flags().is_static()); 358 has_appendix = false; 359 signature = callee->signature(); 360 } 361 362 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); 363 } 364 } 365 366 Method* CompiledMethod::attached_method(address call_instr) { 367 assert(code_contains(call_instr), "not part of the nmethod"); 368 RelocIterator iter(this, call_instr, call_instr + 1); 369 while (iter.next()) { 370 if (iter.addr() == call_instr) { 371 switch(iter.type()) { 372 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); 373 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); 374 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); 375 default: break; 376 } 377 } 378 } 379 return NULL; // not found 380 } 381 382 Method* CompiledMethod::attached_method_before_pc(address pc) { 383 if (NativeCall::is_call_before(pc)) { 384 NativeCall* ncall = nativeCall_before(pc); 385 return attached_method(ncall->instruction_address()); 386 } 387 return NULL; // not a call 388 } 389 390 void CompiledMethod::clear_inline_caches() { 391 assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); 392 if (is_zombie()) { 393 return; 394 } 395 396 RelocIterator iter(this); 397 while (iter.next()) { 398 iter.reloc()->clear_inline_cache(); 399 } 400 } 401 402 // Clear IC callsites, releasing ICStubs of all compiled ICs 403 // as well as any associated CompiledICHolders. 404 void CompiledMethod::clear_ic_callsites() { 405 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 406 ResourceMark rm; 407 RelocIterator iter(this); 408 while(iter.next()) { 409 if (iter.type() == relocInfo::virtual_call_type) { 410 CompiledIC* ic = CompiledIC_at(&iter); 411 ic->set_to_clean(false); 412 } 413 } 414 } 415 416 #ifdef ASSERT 417 // Check class_loader is alive for this bit of metadata. 418 class CheckClass : public MetadataClosure { 419 void do_metadata(Metadata* md) { 420 Klass* klass = NULL; 421 if (md->is_klass()) { 422 klass = ((Klass*)md); 423 } else if (md->is_method()) { 424 klass = ((Method*)md)->method_holder(); 425 } else if (md->is_methodData()) { 426 klass = ((MethodData*)md)->method()->method_holder(); 427 } else { 428 md->print(); 429 ShouldNotReachHere(); 430 } 431 assert(klass->is_loader_alive(), "must be alive"); 432 } 433 }; 434 #endif // ASSERT 435 436 437 bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { 438 if (ic->is_clean()) { 439 return true; 440 } 441 if (ic->is_icholder_call()) { 442 // The only exception is compiledICHolder metdata which may 443 // yet be marked below. (We check this further below). 444 CompiledICHolder* cichk_metdata = ic->cached_icholder(); 445 446 if (cichk_metdata->is_loader_alive()) { 447 return true; 448 } 449 } else { 450 Metadata* ic_metdata = ic->cached_metadata(); 451 if (ic_metdata != NULL) { 452 if (ic_metdata->is_klass()) { 453 if (((Klass*)ic_metdata)->is_loader_alive()) { 454 return true; 455 } 456 } else if (ic_metdata->is_method()) { 457 Method* method = (Method*)ic_metdata; 458 assert(!method->is_old(), "old method should have been cleaned"); 459 if (method->method_holder()->is_loader_alive()) { 460 return true; 461 } 462 } else { 463 ShouldNotReachHere(); 464 } 465 } 466 } 467 468 return ic->set_to_clean(); 469 } 470 471 // Clean references to unloaded nmethods at addr from this one, which is not unloaded. 472 template <class CompiledICorStaticCall> 473 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, 474 bool clean_all) { 475 // Ok, to lookup references to zombies here 476 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); 477 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 478 if (nm != NULL) { 479 // Clean inline caches pointing to both zombie and not_entrant methods 480 if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) { 481 if (!ic->set_to_clean(from->is_alive())) { 482 return false; 483 } 484 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); 485 } 486 } 487 return true; 488 } 489 490 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from, 491 bool clean_all) { 492 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all); 493 } 494 495 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from, 496 bool clean_all) { 497 return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all); 498 } 499 500 // Cleans caches in nmethods that point to either classes that are unloaded 501 // or nmethods that are unloaded. 502 // 503 // Can be called either in parallel by G1 currently or after all 504 // nmethods are unloaded. Return postponed=true in the parallel case for 505 // inline caches found that point to nmethods that are not yet visited during 506 // the do_unloading walk. 507 bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) { 508 ResourceMark rm; 509 510 // Exception cache only needs to be called if unloading occurred 511 if (unloading_occurred) { 512 clean_exception_cache(); 513 } 514 515 if (!cleanup_inline_caches_impl(unloading_occurred, false)) { 516 return false; 517 } 518 519 #ifdef ASSERT 520 // Check that the metadata embedded in the nmethod is alive 521 CheckClass check_class; 522 metadata_do(&check_class); 523 #endif 524 return true; 525 } 526 527 void CompiledMethod::cleanup_inline_caches(bool clean_all) { 528 for (;;) { 529 ICRefillVerifier ic_refill_verifier; 530 { CompiledICLocker ic_locker(this); 531 if (cleanup_inline_caches_impl(false, clean_all)) { 532 return; 533 } 534 } 535 InlineCacheBuffer::refill_ic_stubs(); 536 } 537 } 538 539 // Called to clean up after class unloading for live nmethods and from the sweeper 540 // for all methods. 541 bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { 542 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 543 ResourceMark rm; 544 545 // Find all calls in an nmethod and clear the ones that point to non-entrant, 546 // zombie and unloaded nmethods. 547 RelocIterator iter(this, oops_reloc_begin()); 548 bool is_in_static_stub = false; 549 while(iter.next()) { 550 551 switch (iter.type()) { 552 553 case relocInfo::virtual_call_type: 554 if (unloading_occurred) { 555 // If class unloading occurred we first clear ICs where the cached metadata 556 // is referring to an unloaded klass or method. 557 if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) { 558 return false; 559 } 560 } 561 562 if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { 563 return false; 564 } 565 break; 566 567 case relocInfo::opt_virtual_call_type: 568 if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { 569 return false; 570 } 571 break; 572 573 case relocInfo::static_call_type: 574 if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) { 575 return false; 576 } 577 break; 578 579 case relocInfo::static_stub_type: { 580 is_in_static_stub = true; 581 break; 582 } 583 584 case relocInfo::metadata_type: { 585 // Only the metadata relocations contained in static/opt virtual call stubs 586 // contains the Method* passed to c2i adapters. It is the only metadata 587 // relocation that needs to be walked, as it is the one metadata relocation 588 // that violates the invariant that all metadata relocations have an oop 589 // in the compiled method (due to deferred resolution and code patching). 590 591 // This causes dead metadata to remain in compiled methods that are not 592 // unloading. Unless these slippery metadata relocations of the static 593 // stubs are at least cleared, subsequent class redefinition operations 594 // will access potentially free memory, and JavaThread execution 595 // concurrent to class unloading may call c2i adapters with dead methods. 596 if (!is_in_static_stub) { 597 // The first metadata relocation after a static stub relocation is the 598 // metadata relocation of the static stub used to pass the Method* to 599 // c2i adapters. 600 continue; 601 } 602 is_in_static_stub = false; 603 metadata_Relocation* r = iter.metadata_reloc(); 604 Metadata* md = r->metadata_value(); 605 if (md != NULL && md->is_method()) { 606 Method* method = static_cast<Method*>(md); 607 if (!method->method_holder()->is_loader_alive()) { 608 Atomic::store((Method*)NULL, r->metadata_addr()); 609 610 if (!r->metadata_is_immediate()) { 611 r->fix_metadata_relocation(); 612 } 613 } 614 } 615 break; 616 } 617 618 default: 619 break; 620 } 621 } 622 623 return true; 624 } 625 626 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found 627 // to not be inherently safe. There is a chance that fields are seen which are not properly 628 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock 629 // to be held. 630 // To bundle knowledge about necessary checks in one place, this function was introduced. 631 // It is not claimed that these checks are sufficient, but they were found to be necessary. 632 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) { 633 Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() may be uninitialized, i.e. != NULL, but invalid 634 return (nm != NULL) && (method != NULL) && (method->signature() != NULL) && 635 !nm->is_zombie() && !nm->is_not_installed() && 636 os::is_readable_pointer(method) && 637 os::is_readable_pointer(method->constants()) && 638 os::is_readable_pointer(method->signature()); 639 } 640 641 class HasEvolDependency : public MetadataClosure { 642 bool _has_evol_dependency; 643 public: 644 HasEvolDependency() : _has_evol_dependency(false) {} 645 void do_metadata(Metadata* md) { 646 if (md->is_method()) { 647 Method* method = (Method*)md; 648 if (method->is_old()) { 649 _has_evol_dependency = true; 650 } 651 } 652 } 653 bool has_evol_dependency() const { return _has_evol_dependency; } 654 }; 655 656 bool CompiledMethod::has_evol_metadata() { 657 // Check the metadata in relocIter and CompiledIC and also deoptimize 658 // any nmethod that has reference to old methods. 659 HasEvolDependency check_evol; 660 metadata_do(&check_evol); 661 if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) { 662 ResourceMark rm; 663 log_debug(redefine, class, nmethod) 664 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata", 665 _method->method_holder()->external_name(), 666 _method->name()->as_C_string(), 667 _method->signature()->as_C_string(), 668 compile_id()); 669 } 670 return check_evol.has_evol_dependency(); 671 }