1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/compiledIC.hpp" 27 #include "code/compiledMethod.inline.hpp" 28 #include "code/exceptionHandlerTable.hpp" 29 #include "code/scopeDesc.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/icBuffer.hpp" 32 #include "gc/shared/barrierSet.hpp" 33 #include "gc/shared/barrierSetNMethod.hpp" 34 #include "gc/shared/gcBehaviours.hpp" 35 #include "interpreter/bytecode.inline.hpp" 36 #include "logging/log.hpp" 37 #include "logging/logTag.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/methodData.hpp" 40 #include "oops/method.inline.hpp" 41 #include "prims/methodHandles.hpp" 42 #include "runtime/atomic.hpp" 43 #include "runtime/deoptimization.hpp" 44 #include "runtime/handles.inline.hpp" 45 #include "runtime/mutexLocker.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 48 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, 49 int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, 50 bool caller_must_gc_arguments) 51 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 52 _mark_for_deoptimization_status(not_marked), 53 _method(method), 54 _gc_data(NULL) 55 { 56 init_defaults(); 57 } 58 59 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, 60 int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, 61 OopMapSet* oop_maps, bool caller_must_gc_arguments) 62 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, 63 frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 64 _mark_for_deoptimization_status(not_marked), 65 _method(method), 66 _gc_data(NULL) 67 { 68 init_defaults(); 69 } 70 71 void CompiledMethod::init_defaults() { 72 _has_unsafe_access = 0; 73 _has_method_handle_invokes = 0; 74 _lazy_critical_native = 0; 75 _has_wide_vectors = 0; 76 } 77 78 bool CompiledMethod::is_method_handle_return(address return_pc) { 79 if (!has_method_handle_invokes()) return false; 80 PcDesc* pd = pc_desc_at(return_pc); 81 if (pd == NULL) 82 return false; 83 return pd->is_method_handle_invoke(); 84 } 85 86 // Returns a string version of the method state. 87 const char* CompiledMethod::state() const { 88 int state = get_state(); 89 switch (state) { 90 case not_installed: 91 return "not installed"; 92 case in_use: 93 return "in use"; 94 case not_used: 95 return "not_used"; 96 case not_entrant: 97 return "not_entrant"; 98 case zombie: 99 return "zombie"; 100 case unloaded: 101 return "unloaded"; 102 default: 103 fatal("unexpected method state: %d", state); 104 return NULL; 105 } 106 } 107 108 //----------------------------------------------------------------------------- 109 void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) { 110 MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, 111 Mutex::_no_safepoint_check_flag); 112 _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate); 113 } 114 115 //----------------------------------------------------------------------------- 116 117 ExceptionCache* CompiledMethod::exception_cache_acquire() const { 118 return Atomic::load_acquire(&_exception_cache); 119 } 120 121 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { 122 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); 123 assert(new_entry != NULL,"Must be non null"); 124 assert(new_entry->next() == NULL, "Must be null"); 125 126 for (;;) { 127 ExceptionCache *ec = exception_cache(); 128 if (ec != NULL) { 129 Klass* ex_klass = ec->exception_type(); 130 if (!ex_klass->is_loader_alive()) { 131 // We must guarantee that entries are not inserted with new next pointer 132 // edges to ExceptionCache entries with dead klasses, due to bad interactions 133 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll 134 // the head pointer forward to the first live ExceptionCache, so that the new 135 // next pointers always point at live ExceptionCaches, that are not removed due 136 // to concurrent ExceptionCache cleanup. 137 ExceptionCache* next = ec->next(); 138 if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) { 139 CodeCache::release_exception_cache(ec); 140 } 141 continue; 142 } 143 ec = exception_cache(); 144 if (ec != NULL) { 145 new_entry->set_next(ec); 146 } 147 } 148 if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) { 149 return; 150 } 151 } 152 } 153 154 void CompiledMethod::clean_exception_cache() { 155 // For each nmethod, only a single thread may call this cleanup function 156 // at the same time, whether called in STW cleanup or concurrent cleanup. 157 // Note that if the GC is processing exception cache cleaning in a concurrent phase, 158 // then a single writer may contend with cleaning up the head pointer to the 159 // first ExceptionCache node that has a Klass* that is alive. That is fine, 160 // as long as there is no concurrent cleanup of next pointers from concurrent writers. 161 // And the concurrent writers do not clean up next pointers, only the head. 162 // Also note that concurent readers will walk through Klass* pointers that are not 163 // alive. That does not cause ABA problems, because Klass* is deleted after 164 // a handshake with all threads, after all stale ExceptionCaches have been 165 // unlinked. That is also when the CodeCache::exception_cache_purge_list() 166 // is deleted, with all ExceptionCache entries that were cleaned concurrently. 167 // That similarly implies that CAS operations on ExceptionCache entries do not 168 // suffer from ABA problems as unlinking and deletion is separated by a global 169 // handshake operation. 170 ExceptionCache* prev = NULL; 171 ExceptionCache* curr = exception_cache_acquire(); 172 173 while (curr != NULL) { 174 ExceptionCache* next = curr->next(); 175 176 if (!curr->exception_type()->is_loader_alive()) { 177 if (prev == NULL) { 178 // Try to clean head; this is contended by concurrent inserts, that 179 // both lazily clean the head, and insert entries at the head. If 180 // the CAS fails, the operation is restarted. 181 if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) { 182 prev = NULL; 183 curr = exception_cache_acquire(); 184 continue; 185 } 186 } else { 187 // It is impossible to during cleanup connect the next pointer to 188 // an ExceptionCache that has not been published before a safepoint 189 // prior to the cleanup. Therefore, release is not required. 190 prev->set_next(next); 191 } 192 // prev stays the same. 193 194 CodeCache::release_exception_cache(curr); 195 } else { 196 prev = curr; 197 } 198 199 curr = next; 200 } 201 } 202 203 // public method for accessing the exception cache 204 // These are the public access methods. 205 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { 206 // We never grab a lock to read the exception cache, so we may 207 // have false negatives. This is okay, as it can only happen during 208 // the first few exception lookups for a given nmethod. 209 ExceptionCache* ec = exception_cache_acquire(); 210 while (ec != NULL) { 211 address ret_val; 212 if ((ret_val = ec->match(exception,pc)) != NULL) { 213 return ret_val; 214 } 215 ec = ec->next(); 216 } 217 return NULL; 218 } 219 220 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { 221 // There are potential race conditions during exception cache updates, so we 222 // must own the ExceptionCache_lock before doing ANY modifications. Because 223 // we don't lock during reads, it is possible to have several threads attempt 224 // to update the cache with the same data. We need to check for already inserted 225 // copies of the current data before adding it. 226 227 MutexLocker ml(ExceptionCache_lock); 228 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); 229 230 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { 231 target_entry = new ExceptionCache(exception,pc,handler); 232 add_exception_cache_entry(target_entry); 233 } 234 } 235 236 // private method for handling exception cache 237 // These methods are private, and used to manipulate the exception cache 238 // directly. 239 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { 240 ExceptionCache* ec = exception_cache_acquire(); 241 while (ec != NULL) { 242 if (ec->match_exception_with_space(exception)) { 243 return ec; 244 } 245 ec = ec->next(); 246 } 247 return NULL; 248 } 249 250 //-------------end of code for ExceptionCache-------------- 251 252 bool CompiledMethod::is_at_poll_return(address pc) { 253 RelocIterator iter(this, pc, pc+1); 254 while (iter.next()) { 255 if (iter.type() == relocInfo::poll_return_type) 256 return true; 257 } 258 return false; 259 } 260 261 262 bool CompiledMethod::is_at_poll_or_poll_return(address pc) { 263 RelocIterator iter(this, pc, pc+1); 264 while (iter.next()) { 265 relocInfo::relocType t = iter.type(); 266 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) 267 return true; 268 } 269 return false; 270 } 271 272 void CompiledMethod::verify_oop_relocations() { 273 // Ensure sure that the code matches the current oop values 274 RelocIterator iter(this, NULL, NULL); 275 while (iter.next()) { 276 if (iter.type() == relocInfo::oop_type) { 277 oop_Relocation* reloc = iter.oop_reloc(); 278 if (!reloc->oop_is_immediate()) { 279 reloc->verify_oop_relocation(); 280 } 281 } 282 } 283 } 284 285 286 ScopeDesc* CompiledMethod::scope_desc_at(address pc) { 287 PcDesc* pd = pc_desc_at(pc); 288 guarantee(pd != NULL, "scope must be present"); 289 return new ScopeDesc(this, pd->scope_decode_offset(), 290 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 291 pd->return_oop()); 292 } 293 294 ScopeDesc* CompiledMethod::scope_desc_near(address pc) { 295 PcDesc* pd = pc_desc_near(pc); 296 guarantee(pd != NULL, "scope must be present"); 297 return new ScopeDesc(this, pd->scope_decode_offset(), 298 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 299 pd->return_oop()); 300 } 301 302 address CompiledMethod::oops_reloc_begin() const { 303 // If the method is not entrant or zombie then a JMP is plastered over the 304 // first few bytes. If an oop in the old code was there, that oop 305 // should not get GC'd. Skip the first few bytes of oops on 306 // not-entrant methods. 307 if (frame_complete_offset() != CodeOffsets::frame_never_safe && 308 code_begin() + frame_complete_offset() > 309 verified_entry_point() + NativeJump::instruction_size) 310 { 311 // If we have a frame_complete_offset after the native jump, then there 312 // is no point trying to look for oops before that. This is a requirement 313 // for being allowed to scan oops concurrently. 314 return code_begin() + frame_complete_offset(); 315 } 316 317 // It is not safe to read oops concurrently using entry barriers, if their 318 // location depend on whether the nmethod is entrant or not. 319 assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan"); 320 321 address low_boundary = verified_entry_point(); 322 if (!is_in_use() && is_nmethod()) { 323 low_boundary += NativeJump::instruction_size; 324 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 325 // This means that the low_boundary is going to be a little too high. 326 // This shouldn't matter, since oops of non-entrant methods are never used. 327 // In fact, why are we bothering to look at oops in a non-entrant method?? 328 } 329 return low_boundary; 330 } 331 332 int CompiledMethod::verify_icholder_relocations() { 333 ResourceMark rm; 334 int count = 0; 335 336 RelocIterator iter(this); 337 while(iter.next()) { 338 if (iter.type() == relocInfo::virtual_call_type) { 339 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) { 340 CompiledIC *ic = CompiledIC_at(&iter); 341 if (TraceCompiledIC) { 342 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); 343 ic->print(); 344 } 345 assert(ic->cached_icholder() != NULL, "must be non-NULL"); 346 count++; 347 } 348 } 349 } 350 351 return count; 352 } 353 354 // Method that knows how to preserve outgoing arguments at call. This method must be 355 // called with a frame corresponding to a Java invoke 356 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { 357 if (method() != NULL && !method()->is_native()) { 358 address pc = fr.pc(); 359 SimpleScopeDesc ssd(this, pc); 360 Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci()); 361 bool has_receiver = call.has_receiver(); 362 bool has_appendix = call.has_appendix(); 363 Symbol* signature = call.signature(); 364 365 // The method attached by JIT-compilers should be used, if present. 366 // Bytecode can be inaccurate in such case. 367 Method* callee = attached_method_before_pc(pc); 368 if (callee != NULL) { 369 has_receiver = !(callee->access_flags().is_static()); 370 has_appendix = false; 371 signature = callee->signature(); 372 } 373 374 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); 375 } 376 } 377 378 Method* CompiledMethod::attached_method(address call_instr) { 379 assert(code_contains(call_instr), "not part of the nmethod"); 380 RelocIterator iter(this, call_instr, call_instr + 1); 381 while (iter.next()) { 382 if (iter.addr() == call_instr) { 383 switch(iter.type()) { 384 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); 385 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); 386 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); 387 default: break; 388 } 389 } 390 } 391 return NULL; // not found 392 } 393 394 Method* CompiledMethod::attached_method_before_pc(address pc) { 395 if (NativeCall::is_call_before(pc)) { 396 NativeCall* ncall = nativeCall_before(pc); 397 return attached_method(ncall->instruction_address()); 398 } 399 return NULL; // not a call 400 } 401 402 void CompiledMethod::clear_inline_caches() { 403 assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); 404 if (is_zombie()) { 405 return; 406 } 407 408 RelocIterator iter(this); 409 while (iter.next()) { 410 iter.reloc()->clear_inline_cache(); 411 } 412 } 413 414 // Clear IC callsites, releasing ICStubs of all compiled ICs 415 // as well as any associated CompiledICHolders. 416 void CompiledMethod::clear_ic_callsites() { 417 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 418 ResourceMark rm; 419 RelocIterator iter(this); 420 while(iter.next()) { 421 if (iter.type() == relocInfo::virtual_call_type) { 422 CompiledIC* ic = CompiledIC_at(&iter); 423 ic->set_to_clean(false); 424 } 425 } 426 } 427 428 #ifdef ASSERT 429 // Check class_loader is alive for this bit of metadata. 430 class CheckClass : public MetadataClosure { 431 void do_metadata(Metadata* md) { 432 Klass* klass = NULL; 433 if (md->is_klass()) { 434 klass = ((Klass*)md); 435 } else if (md->is_method()) { 436 klass = ((Method*)md)->method_holder(); 437 } else if (md->is_methodData()) { 438 klass = ((MethodData*)md)->method()->method_holder(); 439 } else { 440 md->print(); 441 ShouldNotReachHere(); 442 } 443 assert(klass->is_loader_alive(), "must be alive"); 444 } 445 }; 446 #endif // ASSERT 447 448 449 bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { 450 if (ic->is_clean()) { 451 return true; 452 } 453 if (ic->is_icholder_call()) { 454 // The only exception is compiledICHolder metdata which may 455 // yet be marked below. (We check this further below). 456 CompiledICHolder* cichk_metdata = ic->cached_icholder(); 457 458 if (cichk_metdata->is_loader_alive()) { 459 return true; 460 } 461 } else { 462 Metadata* ic_metdata = ic->cached_metadata(); 463 if (ic_metdata != NULL) { 464 if (ic_metdata->is_klass()) { 465 if (((Klass*)ic_metdata)->is_loader_alive()) { 466 return true; 467 } 468 } else if (ic_metdata->is_method()) { 469 Method* method = (Method*)ic_metdata; 470 assert(!method->is_old(), "old method should have been cleaned"); 471 if (method->method_holder()->is_loader_alive()) { 472 return true; 473 } 474 } else { 475 ShouldNotReachHere(); 476 } 477 } 478 } 479 480 return ic->set_to_clean(); 481 } 482 483 // Clean references to unloaded nmethods at addr from this one, which is not unloaded. 484 template <class CompiledICorStaticCall> 485 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, 486 bool clean_all) { 487 // Ok, to lookup references to zombies here 488 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); 489 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 490 if (nm != NULL) { 491 // Clean inline caches pointing to both zombie and not_entrant methods 492 if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) { 493 // Inline cache cleaning should only be initiated on CompiledMethods that have been 494 // observed to be is_alive(). However, with concurrent code cache unloading, it is 495 // possible that by now, the state has become !is_alive. This can happen in two ways: 496 // 1) It can be racingly flipped to unloaded if the nmethod // being cleaned (from the 497 // sweeper) is_unloading(). This is fine, because if that happens, then the inline 498 // caches have already been cleaned under the same CompiledICLocker that we now hold during 499 // inline cache cleaning, and we will simply walk the inline caches again, and likely not 500 // find much of interest to clean. However, this race prevents us from asserting that the 501 // nmethod is_alive(). The is_unloading() function is completely monotonic; once set due 502 // to an oop dying, it remains set forever until freed. Because of that, all unloaded 503 // nmethods are is_unloading(), but notably, an unloaded nmethod may also subsequently 504 // become zombie (when the sweeper converts it to zombie). 505 // 2) It can be racingly flipped to zombie if the nmethod being cleaned (by the concurrent 506 // GC) cleans a zombie nmethod that is concurrently made zombie by the sweeper. In this 507 // scenario, the sweeper will first transition the nmethod to zombie, and then when 508 // unregistering from the GC, it will wait until the GC is done. The GC will then clean 509 // the inline caches *with IC stubs*, even though no IC stubs are needed. This is fine, 510 // as long as the IC stubs are guaranteed to be released until the next safepoint, where 511 // IC finalization requires live IC stubs to not be associated with zombie nmethods. 512 // This is guaranteed, because the sweeper does not have a single safepoint check until 513 // after it completes the whole transition function; it will wake up after the GC is 514 // done with concurrent code cache cleaning (which blocks out safepoints using the 515 // suspendible threads set), and then call clear_ic_callsites, which will release the 516 // associated IC stubs, before a subsequent safepoint poll can be reached. This 517 // guarantees that the spuriously created IC stubs are released appropriately before 518 // IC finalization in a safepoint gets to run. Therefore, this race is fine. This is also 519 // valid in a scenario where an inline cache of a zombie nmethod gets a spurious IC stub, 520 // and then when cleaning another inline cache, fails to request an IC stub because we 521 // exhausted the IC stub buffer. In this scenario, the GC will request a safepoint after 522 // yielding the suspendible therad set, effectively unblocking safepoints. Before such 523 // a safepoint can be reached, the sweeper similarly has to wake up, clear the IC stubs, 524 // and reach the next safepoint poll, after the whole transition function has completed. 525 // Due to the various races that can cause an nmethod to first be is_alive() and then 526 // racingly become !is_alive(), it is unfortunately not possible to assert the nmethod 527 // is_alive(), !is_unloaded() or !is_zombie() here. 528 if (!ic->set_to_clean(!from->is_unloading())) { 529 return false; 530 } 531 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); 532 } 533 } 534 return true; 535 } 536 537 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from, 538 bool clean_all) { 539 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all); 540 } 541 542 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from, 543 bool clean_all) { 544 return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all); 545 } 546 547 // Cleans caches in nmethods that point to either classes that are unloaded 548 // or nmethods that are unloaded. 549 // 550 // Can be called either in parallel by G1 currently or after all 551 // nmethods are unloaded. Return postponed=true in the parallel case for 552 // inline caches found that point to nmethods that are not yet visited during 553 // the do_unloading walk. 554 bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) { 555 ResourceMark rm; 556 557 // Exception cache only needs to be called if unloading occurred 558 if (unloading_occurred) { 559 clean_exception_cache(); 560 } 561 562 if (!cleanup_inline_caches_impl(unloading_occurred, false)) { 563 return false; 564 } 565 566 #ifdef ASSERT 567 // Check that the metadata embedded in the nmethod is alive 568 CheckClass check_class; 569 metadata_do(&check_class); 570 #endif 571 return true; 572 } 573 574 void CompiledMethod::run_nmethod_entry_barrier() { 575 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 576 if (bs_nm != NULL) { 577 // We want to keep an invariant that nmethods found through iterations of a Thread's 578 // nmethods found in safepoints have gone through an entry barrier and are not armed. 579 // By calling this nmethod entry barrier, it plays along and acts 580 // like any other nmethod found on the stack of a thread (fewer surprises). 581 nmethod* nm = as_nmethod_or_null(); 582 if (nm != NULL) { 583 bool alive = bs_nm->nmethod_entry_barrier(nm); 584 assert(alive, "should be alive"); 585 } 586 } 587 } 588 589 void CompiledMethod::cleanup_inline_caches(bool clean_all) { 590 for (;;) { 591 ICRefillVerifier ic_refill_verifier; 592 { CompiledICLocker ic_locker(this); 593 if (cleanup_inline_caches_impl(false, clean_all)) { 594 return; 595 } 596 } 597 // Call this nmethod entry barrier from the sweeper. 598 run_nmethod_entry_barrier(); 599 InlineCacheBuffer::refill_ic_stubs(); 600 } 601 } 602 603 // Called to clean up after class unloading for live nmethods and from the sweeper 604 // for all methods. 605 bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { 606 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 607 ResourceMark rm; 608 609 // Find all calls in an nmethod and clear the ones that point to non-entrant, 610 // zombie and unloaded nmethods. 611 RelocIterator iter(this, oops_reloc_begin()); 612 bool is_in_static_stub = false; 613 while(iter.next()) { 614 615 switch (iter.type()) { 616 617 case relocInfo::virtual_call_type: 618 if (unloading_occurred) { 619 // If class unloading occurred we first clear ICs where the cached metadata 620 // is referring to an unloaded klass or method. 621 if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) { 622 return false; 623 } 624 } 625 626 if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { 627 return false; 628 } 629 break; 630 631 case relocInfo::opt_virtual_call_type: 632 if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { 633 return false; 634 } 635 break; 636 637 case relocInfo::static_call_type: 638 if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) { 639 return false; 640 } 641 break; 642 643 case relocInfo::static_stub_type: { 644 is_in_static_stub = true; 645 break; 646 } 647 648 case relocInfo::metadata_type: { 649 // Only the metadata relocations contained in static/opt virtual call stubs 650 // contains the Method* passed to c2i adapters. It is the only metadata 651 // relocation that needs to be walked, as it is the one metadata relocation 652 // that violates the invariant that all metadata relocations have an oop 653 // in the compiled method (due to deferred resolution and code patching). 654 655 // This causes dead metadata to remain in compiled methods that are not 656 // unloading. Unless these slippery metadata relocations of the static 657 // stubs are at least cleared, subsequent class redefinition operations 658 // will access potentially free memory, and JavaThread execution 659 // concurrent to class unloading may call c2i adapters with dead methods. 660 if (!is_in_static_stub) { 661 // The first metadata relocation after a static stub relocation is the 662 // metadata relocation of the static stub used to pass the Method* to 663 // c2i adapters. 664 continue; 665 } 666 is_in_static_stub = false; 667 if (is_unloading()) { 668 // If the nmethod itself is dying, then it may point at dead metadata. 669 // Nobody should follow that metadata; it is strictly unsafe. 670 continue; 671 } 672 metadata_Relocation* r = iter.metadata_reloc(); 673 Metadata* md = r->metadata_value(); 674 if (md != NULL && md->is_method()) { 675 Method* method = static_cast<Method*>(md); 676 if (!method->method_holder()->is_loader_alive()) { 677 Atomic::store(r->metadata_addr(), (Method*)NULL); 678 679 if (!r->metadata_is_immediate()) { 680 r->fix_metadata_relocation(); 681 } 682 } 683 } 684 break; 685 } 686 687 default: 688 break; 689 } 690 } 691 692 return true; 693 } 694 695 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found 696 // to not be inherently safe. There is a chance that fields are seen which are not properly 697 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock 698 // to be held. 699 // To bundle knowledge about necessary checks in one place, this function was introduced. 700 // It is not claimed that these checks are sufficient, but they were found to be necessary. 701 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) { 702 Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() may be uninitialized, i.e. != NULL, but invalid 703 return (nm != NULL) && (method != NULL) && (method->signature() != NULL) && 704 !nm->is_zombie() && !nm->is_not_installed() && 705 os::is_readable_pointer(method) && 706 os::is_readable_pointer(method->constants()) && 707 os::is_readable_pointer(method->signature()); 708 } 709 710 address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) { 711 // Exception happened outside inline-cache check code => we are inside 712 // an active nmethod => use cpc to determine a return address 713 int exception_offset = pc - code_begin(); 714 int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset ); 715 #ifdef ASSERT 716 if (cont_offset == 0) { 717 Thread* thread = Thread::current(); 718 ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY 719 HandleMark hm(thread); 720 ResourceMark rm(thread); 721 CodeBlob* cb = CodeCache::find_blob(pc); 722 assert(cb != NULL && cb == this, ""); 723 ttyLocker ttyl; 724 tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc)); 725 print(); 726 method()->print_codes(); 727 print_code(); 728 print_pcs(); 729 } 730 #endif 731 if (cont_offset == 0) { 732 // Let the normal error handling report the exception 733 return NULL; 734 } 735 if (cont_offset == exception_offset) { 736 #if INCLUDE_JVMCI 737 Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check; 738 JavaThread *thread = JavaThread::current(); 739 thread->set_jvmci_implicit_exception_pc(pc); 740 thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason, 741 Deoptimization::Action_reinterpret)); 742 return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap()); 743 #else 744 ShouldNotReachHere(); 745 #endif 746 } 747 return code_begin() + cont_offset; 748 } 749 750 class HasEvolDependency : public MetadataClosure { 751 bool _has_evol_dependency; 752 public: 753 HasEvolDependency() : _has_evol_dependency(false) {} 754 void do_metadata(Metadata* md) { 755 if (md->is_method()) { 756 Method* method = (Method*)md; 757 if (method->is_old()) { 758 _has_evol_dependency = true; 759 } 760 } 761 } 762 bool has_evol_dependency() const { return _has_evol_dependency; } 763 }; 764 765 bool CompiledMethod::has_evol_metadata() { 766 // Check the metadata in relocIter and CompiledIC and also deoptimize 767 // any nmethod that has reference to old methods. 768 HasEvolDependency check_evol; 769 metadata_do(&check_evol); 770 if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) { 771 ResourceMark rm; 772 log_debug(redefine, class, nmethod) 773 ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata", 774 _method->method_holder()->external_name(), 775 _method->name()->as_C_string(), 776 _method->signature()->as_C_string(), 777 compile_id()); 778 } 779 return check_evol.has_evol_dependency(); 780 }