1 /* 2 * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/compiledIC.hpp" 27 #include "code/compiledMethod.inline.hpp" 28 #include "code/scopeDesc.hpp" 29 #include "code/codeCache.hpp" 30 #include "interpreter/bytecode.inline.hpp" 31 #include "logging/log.hpp" 32 #include "logging/logTag.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/mutexLocker.hpp" 39 40 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) 41 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 42 _mark_for_deoptimization_status(not_marked), _method(method) { 43 init_defaults(); 44 } 45 46 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) 47 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 48 _mark_for_deoptimization_status(not_marked), _method(method) { 49 init_defaults(); 50 } 51 52 void CompiledMethod::init_defaults() { 53 _has_unsafe_access = 0; 54 _has_method_handle_invokes = 0; 55 _lazy_critical_native = 0; 56 _has_wide_vectors = 0; 57 _unloading_clock = 0; 58 } 59 60 bool CompiledMethod::is_method_handle_return(address return_pc) { 61 if (!has_method_handle_invokes()) return false; 62 PcDesc* pd = pc_desc_at(return_pc); 63 if (pd == NULL) 64 return false; 65 return pd->is_method_handle_invoke(); 66 } 67 68 // Returns a string version of the method state. 69 const char* CompiledMethod::state() const { 70 int state = get_state(); 71 switch (state) { 72 case not_installed: 73 return "not installed"; 74 case in_use: 75 return "in use"; 76 case not_used: 77 return "not_used"; 78 case not_entrant: 79 return "not_entrant"; 80 case zombie: 81 return "zombie"; 82 case unloaded: 83 return "unloaded"; 84 default: 85 fatal("unexpected method state: %d", state); 86 return NULL; 87 } 88 } 89 90 //----------------------------------------------------------------------------- 91 92 ExceptionCache* CompiledMethod::exception_cache_acquire() const { 93 return OrderAccess::load_acquire(&_exception_cache); 94 } 95 96 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { 97 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); 98 assert(new_entry != NULL,"Must be non null"); 99 assert(new_entry->next() == NULL, "Must be null"); 100 101 for (;;) { 102 ExceptionCache *ec = exception_cache(); 103 if (ec != NULL) { 104 Klass* ex_klass = ec->exception_type(); 105 if (!ex_klass->is_loader_alive()) { 106 // We must guarantee that entries are not inserted with new next pointer 107 // edges to ExceptionCache entries with dead klasses, due to bad interactions 108 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll 109 // the head pointer forward to the first live ExceptionCache, so that the new 110 // next pointers always point at live ExceptionCaches, that are not removed due 111 // to concurrent ExceptionCache cleanup. 112 ExceptionCache* next = ec->next(); 113 if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) { 114 CodeCache::release_exception_cache(ec); 115 } 116 continue; 117 } 118 ec = exception_cache(); 119 if (ec != NULL) { 120 new_entry->set_next(ec); 121 } 122 } 123 if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) { 124 return; 125 } 126 } 127 } 128 129 void CompiledMethod::clean_exception_cache() { 130 // For each nmethod, only a single thread may call this cleanup function 131 // at the same time, whether called in STW cleanup or concurrent cleanup. 132 // Note that if the GC is processing exception cache cleaning in a concurrent phase, 133 // then a single writer may contend with cleaning up the head pointer to the 134 // first ExceptionCache node that has a Klass* that is alive. That is fine, 135 // as long as there is no concurrent cleanup of next pointers from concurrent writers. 136 // And the concurrent writers do not clean up next pointers, only the head. 137 // Also note that concurent readers will walk through Klass* pointers that are not 138 // alive. That does not cause ABA problems, because Klass* is deleted after 139 // a handshake with all threads, after all stale ExceptionCaches have been 140 // unlinked. That is also when the CodeCache::exception_cache_free_list() 141 ExceptionCache* prev = NULL; 142 ExceptionCache* curr = exception_cache_acquire(); 143 144 while (curr != NULL) { 145 ExceptionCache* next = curr->next(); 146 147 if (!curr->exception_type()->is_loader_alive()) { 148 if (prev == NULL) { 149 // Try to clean head; this is contended by concurrent inserts, that 150 // both lazily clean the head, and insert entries at the head. If 151 // the CAS fails, the operation is restarted. 152 if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) { 153 prev = NULL; 154 curr = exception_cache_acquire(); 155 continue; 156 } 157 } else { 158 // It is impossible to during cleanup connect the next pointer to 159 // an ExceptionCache that has not been published before a safepoint 160 // prior to the cleanup. Therefore, release is not required. 161 prev->set_next(next); 162 } 163 // prev stays the same. 164 165 CodeCache::release_exception_cache(curr); 166 } else { 167 prev = curr; 168 } 169 170 curr = next; 171 } 172 } 173 174 // public method for accessing the exception cache 175 // These are the public access methods. 176 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { 177 // We never grab a lock to read the exception cache, so we may 178 // have false negatives. This is okay, as it can only happen during 179 // the first few exception lookups for a given nmethod. 180 ExceptionCache* ec = exception_cache_acquire(); 181 while (ec != NULL) { 182 address ret_val; 183 if ((ret_val = ec->match(exception,pc)) != NULL) { 184 return ret_val; 185 } 186 ec = ec->next(); 187 } 188 return NULL; 189 } 190 191 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { 192 // There are potential race conditions during exception cache updates, so we 193 // must own the ExceptionCache_lock before doing ANY modifications. Because 194 // we don't lock during reads, it is possible to have several threads attempt 195 // to update the cache with the same data. We need to check for already inserted 196 // copies of the current data before adding it. 197 198 MutexLocker ml(ExceptionCache_lock); 199 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); 200 201 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { 202 target_entry = new ExceptionCache(exception,pc,handler); 203 add_exception_cache_entry(target_entry); 204 } 205 } 206 207 // private method for handling exception cache 208 // These methods are private, and used to manipulate the exception cache 209 // directly. 210 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { 211 ExceptionCache* ec = exception_cache_acquire(); 212 while (ec != NULL) { 213 if (ec->match_exception_with_space(exception)) { 214 return ec; 215 } 216 ec = ec->next(); 217 } 218 return NULL; 219 } 220 221 //-------------end of code for ExceptionCache-------------- 222 223 bool CompiledMethod::is_at_poll_return(address pc) { 224 RelocIterator iter(this, pc, pc+1); 225 while (iter.next()) { 226 if (iter.type() == relocInfo::poll_return_type) 227 return true; 228 } 229 return false; 230 } 231 232 233 bool CompiledMethod::is_at_poll_or_poll_return(address pc) { 234 RelocIterator iter(this, pc, pc+1); 235 while (iter.next()) { 236 relocInfo::relocType t = iter.type(); 237 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) 238 return true; 239 } 240 return false; 241 } 242 243 void CompiledMethod::verify_oop_relocations() { 244 // Ensure sure that the code matches the current oop values 245 RelocIterator iter(this, NULL, NULL); 246 while (iter.next()) { 247 if (iter.type() == relocInfo::oop_type) { 248 oop_Relocation* reloc = iter.oop_reloc(); 249 if (!reloc->oop_is_immediate()) { 250 reloc->verify_oop_relocation(); 251 } 252 } 253 } 254 } 255 256 257 ScopeDesc* CompiledMethod::scope_desc_at(address pc) { 258 PcDesc* pd = pc_desc_at(pc); 259 guarantee(pd != NULL, "scope must be present"); 260 return new ScopeDesc(this, pd->scope_decode_offset(), 261 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 262 pd->return_oop()); 263 } 264 265 ScopeDesc* CompiledMethod::scope_desc_near(address pc) { 266 PcDesc* pd = pc_desc_near(pc); 267 guarantee(pd != NULL, "scope must be present"); 268 return new ScopeDesc(this, pd->scope_decode_offset(), 269 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 270 pd->return_oop()); 271 } 272 273 address CompiledMethod::oops_reloc_begin() const { 274 // If the method is not entrant or zombie then a JMP is plastered over the 275 // first few bytes. If an oop in the old code was there, that oop 276 // should not get GC'd. Skip the first few bytes of oops on 277 // not-entrant methods. 278 address low_boundary = verified_entry_point(); 279 if (!is_in_use() && is_nmethod()) { 280 low_boundary += NativeJump::instruction_size; 281 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 282 // This means that the low_boundary is going to be a little too high. 283 // This shouldn't matter, since oops of non-entrant methods are never used. 284 // In fact, why are we bothering to look at oops in a non-entrant method?? 285 } 286 return low_boundary; 287 } 288 289 int CompiledMethod::verify_icholder_relocations() { 290 ResourceMark rm; 291 int count = 0; 292 293 RelocIterator iter(this); 294 while(iter.next()) { 295 if (iter.type() == relocInfo::virtual_call_type) { 296 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) { 297 CompiledIC *ic = CompiledIC_at(&iter); 298 if (TraceCompiledIC) { 299 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); 300 ic->print(); 301 } 302 assert(ic->cached_icholder() != NULL, "must be non-NULL"); 303 count++; 304 } 305 } 306 } 307 308 return count; 309 } 310 311 // Method that knows how to preserve outgoing arguments at call. This method must be 312 // called with a frame corresponding to a Java invoke 313 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { 314 if (method() != NULL && !method()->is_native()) { 315 address pc = fr.pc(); 316 SimpleScopeDesc ssd(this, pc); 317 Bytecode_invoke call(ssd.method(), ssd.bci()); 318 bool has_receiver = call.has_receiver(); 319 bool has_appendix = call.has_appendix(); 320 Symbol* signature = call.signature(); 321 322 // The method attached by JIT-compilers should be used, if present. 323 // Bytecode can be inaccurate in such case. 324 Method* callee = attached_method_before_pc(pc); 325 if (callee != NULL) { 326 has_receiver = !(callee->access_flags().is_static()); 327 has_appendix = false; 328 signature = callee->signature(); 329 } 330 331 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); 332 } 333 } 334 335 Method* CompiledMethod::attached_method(address call_instr) { 336 assert(code_contains(call_instr), "not part of the nmethod"); 337 RelocIterator iter(this, call_instr, call_instr + 1); 338 while (iter.next()) { 339 if (iter.addr() == call_instr) { 340 switch(iter.type()) { 341 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); 342 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); 343 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); 344 default: break; 345 } 346 } 347 } 348 return NULL; // not found 349 } 350 351 Method* CompiledMethod::attached_method_before_pc(address pc) { 352 if (NativeCall::is_call_before(pc)) { 353 NativeCall* ncall = nativeCall_before(pc); 354 return attached_method(ncall->instruction_address()); 355 } 356 return NULL; // not a call 357 } 358 359 void CompiledMethod::clear_inline_caches() { 360 assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); 361 if (is_zombie()) { 362 return; 363 } 364 365 RelocIterator iter(this); 366 while (iter.next()) { 367 iter.reloc()->clear_inline_cache(); 368 } 369 } 370 371 // Clear ICStubs of all compiled ICs 372 void CompiledMethod::clear_ic_stubs() { 373 assert_locked_or_safepoint(CompiledIC_lock); 374 ResourceMark rm; 375 RelocIterator iter(this); 376 while(iter.next()) { 377 if (iter.type() == relocInfo::virtual_call_type) { 378 CompiledIC* ic = CompiledIC_at(&iter); 379 ic->clear_ic_stub(); 380 } 381 } 382 } 383 384 #ifdef ASSERT 385 // Check class_loader is alive for this bit of metadata. 386 static void check_class(Metadata* md) { 387 Klass* klass = NULL; 388 if (md->is_klass()) { 389 klass = ((Klass*)md); 390 } else if (md->is_method()) { 391 klass = ((Method*)md)->method_holder(); 392 } else if (md->is_methodData()) { 393 klass = ((MethodData*)md)->method()->method_holder(); 394 } else { 395 md->print(); 396 ShouldNotReachHere(); 397 } 398 assert(klass->is_loader_alive(), "must be alive"); 399 } 400 #endif // ASSERT 401 402 403 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { 404 if (ic->is_icholder_call()) { 405 // The only exception is compiledICHolder metdata which may 406 // yet be marked below. (We check this further below). 407 CompiledICHolder* cichk_metdata = ic->cached_icholder(); 408 409 if (cichk_metdata->is_loader_alive()) { 410 return; 411 } 412 } else { 413 Metadata* ic_metdata = ic->cached_metadata(); 414 if (ic_metdata != NULL) { 415 if (ic_metdata->is_klass()) { 416 if (((Klass*)ic_metdata)->is_loader_alive()) { 417 return; 418 } 419 } else if (ic_metdata->is_method()) { 420 Method* method = (Method*)ic_metdata; 421 assert(!method->is_old(), "old method should have been cleaned"); 422 if (method->method_holder()->is_loader_alive()) { 423 return; 424 } 425 } else { 426 ShouldNotReachHere(); 427 } 428 } 429 } 430 431 ic->set_to_clean(); 432 } 433 434 unsigned char CompiledMethod::_global_unloading_clock = 0; 435 436 void CompiledMethod::increase_unloading_clock() { 437 _global_unloading_clock++; 438 if (_global_unloading_clock == 0) { 439 // _nmethods are allocated with _unloading_clock == 0, 440 // so 0 is never used as a clock value. 441 _global_unloading_clock = 1; 442 } 443 } 444 445 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) { 446 OrderAccess::release_store(&_unloading_clock, unloading_clock); 447 } 448 449 unsigned char CompiledMethod::unloading_clock() { 450 return OrderAccess::load_acquire(&_unloading_clock); 451 } 452 453 454 // static_stub_Relocations may have dangling references to 455 // nmethods so trim them out here. Otherwise it looks like 456 // compiled code is maintaining a link to dead metadata. 457 void CompiledMethod::clean_ic_stubs() { 458 #ifdef ASSERT 459 address low_boundary = oops_reloc_begin(); 460 RelocIterator iter(this, low_boundary); 461 while (iter.next()) { 462 address static_call_addr = NULL; 463 if (iter.type() == relocInfo::opt_virtual_call_type) { 464 CompiledIC* cic = CompiledIC_at(&iter); 465 if (!cic->is_call_to_interpreted()) { 466 static_call_addr = iter.addr(); 467 } 468 } else if (iter.type() == relocInfo::static_call_type) { 469 CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc()); 470 if (!csc->is_call_to_interpreted()) { 471 static_call_addr = iter.addr(); 472 } 473 } 474 if (static_call_addr != NULL) { 475 RelocIterator sciter(this, low_boundary); 476 while (sciter.next()) { 477 if (sciter.type() == relocInfo::static_stub_type && 478 sciter.static_stub_reloc()->static_call() == static_call_addr) { 479 sciter.static_stub_reloc()->clear_inline_cache(); 480 } 481 } 482 } 483 } 484 #endif 485 } 486 487 // This is called at the end of the strong tracing/marking phase of a 488 // GC to unload an nmethod if it contains otherwise unreachable 489 // oops. 490 491 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) { 492 // Make sure the oop's ready to receive visitors 493 assert(!is_zombie() && !is_unloaded(), 494 "should not call follow on zombie or unloaded nmethod"); 495 496 address low_boundary = oops_reloc_begin(); 497 498 if (do_unloading_oops(low_boundary, is_alive)) { 499 return; 500 } 501 502 #if INCLUDE_JVMCI 503 if (do_unloading_jvmci()) { 504 return; 505 } 506 #endif 507 508 // Cleanup exception cache and inline caches happens 509 // after all the unloaded methods are found. 510 } 511 512 // Clean references to unloaded nmethods at addr from this one, which is not unloaded. 513 template <class CompiledICorStaticCall> 514 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, 515 bool parallel, bool clean_all) { 516 // Ok, to lookup references to zombies here 517 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); 518 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 519 if (nm != NULL) { 520 if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) { 521 // The nmethod has not been processed yet. 522 return true; 523 } 524 525 // Clean inline caches pointing to both zombie and not_entrant methods 526 if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) { 527 ic->set_to_clean(from->is_alive()); 528 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); 529 } 530 } 531 532 return false; 533 } 534 535 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from, 536 bool parallel, bool clean_all = false) { 537 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all); 538 } 539 540 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from, 541 bool parallel, bool clean_all = false) { 542 return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all); 543 } 544 545 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) { 546 ResourceMark rm; 547 548 // Make sure the oop's ready to receive visitors 549 assert(!is_zombie() && !is_unloaded(), 550 "should not call follow on zombie or unloaded nmethod"); 551 552 address low_boundary = oops_reloc_begin(); 553 554 if (do_unloading_oops(low_boundary, is_alive)) { 555 return false; 556 } 557 558 #if INCLUDE_JVMCI 559 if (do_unloading_jvmci()) { 560 return false; 561 } 562 #endif 563 564 return unload_nmethod_caches(/*parallel*/true, unloading_occurred); 565 } 566 567 // Cleans caches in nmethods that point to either classes that are unloaded 568 // or nmethods that are unloaded. 569 // 570 // Can be called either in parallel by G1 currently or after all 571 // nmethods are unloaded. Return postponed=true in the parallel case for 572 // inline caches found that point to nmethods that are not yet visited during 573 // the do_unloading walk. 574 bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) { 575 576 // Exception cache only needs to be called if unloading occurred 577 if (unloading_occurred) { 578 clean_exception_cache(); 579 } 580 581 bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false); 582 583 // All static stubs need to be cleaned. 584 clean_ic_stubs(); 585 586 // Check that the metadata embedded in the nmethod is alive 587 DEBUG_ONLY(metadata_do(check_class)); 588 589 return postponed; 590 } 591 592 // Called to clean up after class unloading for live nmethods and from the sweeper 593 // for all methods. 594 bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) { 595 assert_locked_or_safepoint(CompiledIC_lock); 596 bool postponed = false; 597 ResourceMark rm; 598 599 // Find all calls in an nmethod and clear the ones that point to non-entrant, 600 // zombie and unloaded nmethods. 601 RelocIterator iter(this, oops_reloc_begin()); 602 while(iter.next()) { 603 604 switch (iter.type()) { 605 606 case relocInfo::virtual_call_type: 607 if (unloading_occurred) { 608 // If class unloading occurred we first clear ICs where the cached metadata 609 // is referring to an unloaded klass or method. 610 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter)); 611 } 612 613 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all); 614 break; 615 616 case relocInfo::opt_virtual_call_type: 617 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all); 618 break; 619 620 case relocInfo::static_call_type: 621 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all); 622 break; 623 624 case relocInfo::oop_type: 625 // handled by do_unloading_oops already 626 break; 627 628 case relocInfo::metadata_type: 629 break; // nothing to do. 630 631 default: 632 break; 633 } 634 } 635 636 return postponed; 637 } 638 639 void CompiledMethod::do_unloading_parallel_postponed() { 640 ResourceMark rm; 641 642 // Make sure the oop's ready to receive visitors 643 assert(!is_zombie(), 644 "should not call follow on zombie nmethod"); 645 646 RelocIterator iter(this, oops_reloc_begin()); 647 while(iter.next()) { 648 649 switch (iter.type()) { 650 651 case relocInfo::virtual_call_type: 652 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true); 653 break; 654 655 case relocInfo::opt_virtual_call_type: 656 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true); 657 break; 658 659 case relocInfo::static_call_type: 660 clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true); 661 break; 662 663 default: 664 break; 665 } 666 } 667 } 668 669 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found 670 // to not be inherently safe. There is a chance that fields are seen which are not properly 671 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock 672 // to be held. 673 // To bundle knowledge about necessary checks in one place, this function was introduced. 674 // It is not claimed that these checks are sufficient, but they were found to be necessary. 675 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) { 676 Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() may be uninitialized, i.e. != NULL, but invalid 677 return (nm != NULL) && (method != NULL) && (method->signature() != NULL) && 678 !nm->is_zombie() && !nm->is_not_installed() && 679 os::is_readable_pointer(method) && 680 os::is_readable_pointer(method->constants()) && 681 os::is_readable_pointer(method->signature()); 682 }