1 /* 2 * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/compiledIC.hpp" 27 #include "code/compiledMethod.inline.hpp" 28 #include "code/scopeDesc.hpp" 29 #include "code/codeCache.hpp" 30 #include "interpreter/bytecode.inline.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "oops/methodData.hpp" 33 #include "oops/method.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/mutexLocker.hpp" 37 38 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) 39 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 40 _method(method), _mark_for_deoptimization_status(not_marked) { 41 init_defaults(); 42 } 43 44 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) 45 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 46 _method(method), _mark_for_deoptimization_status(not_marked) { 47 init_defaults(); 48 } 49 50 void CompiledMethod::init_defaults() { 51 _has_unsafe_access = 0; 52 _has_method_handle_invokes = 0; 53 _lazy_critical_native = 0; 54 _has_wide_vectors = 0; 55 _unloading_clock = 0; 56 } 57 58 bool CompiledMethod::is_method_handle_return(address return_pc) { 59 if (!has_method_handle_invokes()) return false; 60 PcDesc* pd = pc_desc_at(return_pc); 61 if (pd == NULL) 62 return false; 63 return pd->is_method_handle_invoke(); 64 } 65 66 // Returns a string version of the method state. 67 const char* CompiledMethod::state() const { 68 int state = get_state(); 69 switch (state) { 70 case not_installed: 71 return "not installed"; 72 case in_use: 73 return "in use"; 74 case not_used: 75 return "not_used"; 76 case not_entrant: 77 return "not_entrant"; 78 case zombie: 79 return "zombie"; 80 case unloaded: 81 return "unloaded"; 82 default: 83 fatal("unexpected method state: %d", state); 84 return NULL; 85 } 86 } 87 88 //----------------------------------------------------------------------------- 89 90 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { 91 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); 92 assert(new_entry != NULL,"Must be non null"); 93 assert(new_entry->next() == NULL, "Must be null"); 94 95 ExceptionCache *ec = exception_cache(); 96 if (ec != NULL) { 97 new_entry->set_next(ec); 98 } 99 release_set_exception_cache(new_entry); 100 } 101 102 void CompiledMethod::clean_exception_cache() { 103 ExceptionCache* prev = NULL; 104 ExceptionCache* curr = exception_cache(); 105 106 while (curr != NULL) { 107 ExceptionCache* next = curr->next(); 108 109 Klass* ex_klass = curr->exception_type(); 110 if (ex_klass != NULL && !ex_klass->is_loader_alive()) { 111 if (prev == NULL) { 112 set_exception_cache(next); 113 } else { 114 prev->set_next(next); 115 } 116 delete curr; 117 // prev stays the same. 118 } else { 119 prev = curr; 120 } 121 122 curr = next; 123 } 124 } 125 126 // public method for accessing the exception cache 127 // These are the public access methods. 128 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { 129 // We never grab a lock to read the exception cache, so we may 130 // have false negatives. This is okay, as it can only happen during 131 // the first few exception lookups for a given nmethod. 132 ExceptionCache* ec = exception_cache(); 133 while (ec != NULL) { 134 address ret_val; 135 if ((ret_val = ec->match(exception,pc)) != NULL) { 136 return ret_val; 137 } 138 ec = ec->next(); 139 } 140 return NULL; 141 } 142 143 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { 144 // There are potential race conditions during exception cache updates, so we 145 // must own the ExceptionCache_lock before doing ANY modifications. Because 146 // we don't lock during reads, it is possible to have several threads attempt 147 // to update the cache with the same data. We need to check for already inserted 148 // copies of the current data before adding it. 149 150 MutexLocker ml(ExceptionCache_lock); 151 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); 152 153 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { 154 target_entry = new ExceptionCache(exception,pc,handler); 155 add_exception_cache_entry(target_entry); 156 } 157 } 158 159 //-------------end of code for ExceptionCache-------------- 160 161 // private method for handling exception cache 162 // These methods are private, and used to manipulate the exception cache 163 // directly. 164 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { 165 ExceptionCache* ec = exception_cache(); 166 while (ec != NULL) { 167 if (ec->match_exception_with_space(exception)) { 168 return ec; 169 } 170 ec = ec->next(); 171 } 172 return NULL; 173 } 174 175 bool CompiledMethod::is_at_poll_return(address pc) { 176 RelocIterator iter(this, pc, pc+1); 177 while (iter.next()) { 178 if (iter.type() == relocInfo::poll_return_type) 179 return true; 180 } 181 return false; 182 } 183 184 185 bool CompiledMethod::is_at_poll_or_poll_return(address pc) { 186 RelocIterator iter(this, pc, pc+1); 187 while (iter.next()) { 188 relocInfo::relocType t = iter.type(); 189 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) 190 return true; 191 } 192 return false; 193 } 194 195 void CompiledMethod::verify_oop_relocations() { 196 // Ensure sure that the code matches the current oop values 197 RelocIterator iter(this, NULL, NULL); 198 while (iter.next()) { 199 if (iter.type() == relocInfo::oop_type) { 200 oop_Relocation* reloc = iter.oop_reloc(); 201 if (!reloc->oop_is_immediate()) { 202 reloc->verify_oop_relocation(); 203 } 204 } 205 } 206 } 207 208 209 ScopeDesc* CompiledMethod::scope_desc_at(address pc) { 210 PcDesc* pd = pc_desc_at(pc); 211 guarantee(pd != NULL, "scope must be present"); 212 return new ScopeDesc(this, pd->scope_decode_offset(), 213 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 214 pd->return_oop()); 215 } 216 217 ScopeDesc* CompiledMethod::scope_desc_near(address pc) { 218 PcDesc* pd = pc_desc_near(pc); 219 guarantee(pd != NULL, "scope must be present"); 220 return new ScopeDesc(this, pd->scope_decode_offset(), 221 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 222 pd->return_oop()); 223 } 224 225 void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) { 226 assert_locked_or_safepoint(CompiledIC_lock); 227 228 // If the method is not entrant or zombie then a JMP is plastered over the 229 // first few bytes. If an oop in the old code was there, that oop 230 // should not get GC'd. Skip the first few bytes of oops on 231 // not-entrant methods. 232 address low_boundary = verified_entry_point(); 233 if (!is_in_use() && is_nmethod()) { 234 low_boundary += NativeJump::instruction_size; 235 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 236 // This means that the low_boundary is going to be a little too high. 237 // This shouldn't matter, since oops of non-entrant methods are never used. 238 // In fact, why are we bothering to look at oops in a non-entrant method?? 239 } 240 241 // Find all calls in an nmethod and clear the ones that point to non-entrant, 242 // zombie and unloaded nmethods. 243 ResourceMark rm; 244 RelocIterator iter(this, low_boundary); 245 while(iter.next()) { 246 switch(iter.type()) { 247 case relocInfo::virtual_call_type: 248 case relocInfo::opt_virtual_call_type: { 249 CompiledIC *ic = CompiledIC_at(&iter); 250 // Ok, to lookup references to zombies here 251 CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); 252 if( cb != NULL && cb->is_compiled() ) { 253 CompiledMethod* nm = cb->as_compiled_method(); 254 // Clean inline caches pointing to zombie, non-entrant and unloaded methods 255 if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive()); 256 } 257 break; 258 } 259 case relocInfo::static_call_type: { 260 CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); 261 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); 262 if( cb != NULL && cb->is_compiled() ) { 263 CompiledMethod* cm = cb->as_compiled_method(); 264 // Clean inline caches pointing to zombie, non-entrant and unloaded methods 265 if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) { 266 csc->set_to_clean(); 267 } 268 } 269 break; 270 } 271 default: 272 break; 273 } 274 } 275 } 276 277 int CompiledMethod::verify_icholder_relocations() { 278 ResourceMark rm; 279 int count = 0; 280 281 RelocIterator iter(this); 282 while(iter.next()) { 283 if (iter.type() == relocInfo::virtual_call_type) { 284 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) { 285 CompiledIC *ic = CompiledIC_at(&iter); 286 if (TraceCompiledIC) { 287 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); 288 ic->print(); 289 } 290 assert(ic->cached_icholder() != NULL, "must be non-NULL"); 291 count++; 292 } 293 } 294 } 295 296 return count; 297 } 298 299 // Method that knows how to preserve outgoing arguments at call. This method must be 300 // called with a frame corresponding to a Java invoke 301 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { 302 if (method() != NULL && !method()->is_native()) { 303 address pc = fr.pc(); 304 SimpleScopeDesc ssd(this, pc); 305 Bytecode_invoke call(ssd.method(), ssd.bci()); 306 bool has_receiver = call.has_receiver(); 307 bool has_appendix = call.has_appendix(); 308 Symbol* signature = call.signature(); 309 310 // The method attached by JIT-compilers should be used, if present. 311 // Bytecode can be inaccurate in such case. 312 Method* callee = attached_method_before_pc(pc); 313 if (callee != NULL) { 314 has_receiver = !(callee->access_flags().is_static()); 315 has_appendix = false; 316 signature = callee->signature(); 317 } 318 319 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); 320 } 321 } 322 323 Method* CompiledMethod::attached_method(address call_instr) { 324 assert(code_contains(call_instr), "not part of the nmethod"); 325 RelocIterator iter(this, call_instr, call_instr + 1); 326 while (iter.next()) { 327 if (iter.addr() == call_instr) { 328 switch(iter.type()) { 329 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); 330 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); 331 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); 332 default: break; 333 } 334 } 335 } 336 return NULL; // not found 337 } 338 339 Method* CompiledMethod::attached_method_before_pc(address pc) { 340 if (NativeCall::is_call_before(pc)) { 341 NativeCall* ncall = nativeCall_before(pc); 342 return attached_method(ncall->instruction_address()); 343 } 344 return NULL; // not a call 345 } 346 347 void CompiledMethod::clear_inline_caches() { 348 assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); 349 if (is_zombie()) { 350 return; 351 } 352 353 RelocIterator iter(this); 354 while (iter.next()) { 355 iter.reloc()->clear_inline_cache(); 356 } 357 } 358 359 // Clear ICStubs of all compiled ICs 360 void CompiledMethod::clear_ic_stubs() { 361 assert_locked_or_safepoint(CompiledIC_lock); 362 RelocIterator iter(this); 363 while(iter.next()) { 364 if (iter.type() == relocInfo::virtual_call_type) { 365 CompiledIC* ic = CompiledIC_at(&iter); 366 ic->clear_ic_stub(); 367 } 368 } 369 } 370 371 #ifdef ASSERT 372 // Check class_loader is alive for this bit of metadata. 373 static void check_class(Metadata* md) { 374 Klass* klass = NULL; 375 if (md->is_klass()) { 376 klass = ((Klass*)md); 377 } else if (md->is_method()) { 378 klass = ((Method*)md)->method_holder(); 379 } else if (md->is_methodData()) { 380 klass = ((MethodData*)md)->method()->method_holder(); 381 } else { 382 md->print(); 383 ShouldNotReachHere(); 384 } 385 assert(klass->is_loader_alive(), "must be alive"); 386 } 387 #endif // ASSERT 388 389 390 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { 391 if (ic->is_icholder_call()) { 392 // The only exception is compiledICHolder metdata which may 393 // yet be marked below. (We check this further below). 394 CompiledICHolder* cichk_metdata = ic->cached_icholder(); 395 396 if (cichk_metdata->is_loader_alive()) { 397 return; 398 } 399 } else { 400 Metadata* ic_metdata = ic->cached_metadata(); 401 if (ic_metdata != NULL) { 402 if (ic_metdata->is_klass()) { 403 if (((Klass*)ic_metdata)->is_loader_alive()) { 404 return; 405 } 406 } else if (ic_metdata->is_method()) { 407 Method* method = (Method*)ic_metdata; 408 assert(!method->is_old(), "old method should have been cleaned"); 409 if (method->method_holder()->is_loader_alive()) { 410 return; 411 } 412 } else { 413 ShouldNotReachHere(); 414 } 415 } 416 } 417 418 ic->set_to_clean(); 419 } 420 421 unsigned char CompiledMethod::_global_unloading_clock = 0; 422 423 void CompiledMethod::increase_unloading_clock() { 424 _global_unloading_clock++; 425 if (_global_unloading_clock == 0) { 426 // _nmethods are allocated with _unloading_clock == 0, 427 // so 0 is never used as a clock value. 428 _global_unloading_clock = 1; 429 } 430 } 431 432 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) { 433 OrderAccess::release_store(&_unloading_clock, unloading_clock); 434 } 435 436 unsigned char CompiledMethod::unloading_clock() { 437 return OrderAccess::load_acquire(&_unloading_clock); 438 } 439 440 // Processing of oop references should have been sufficient to keep 441 // all strong references alive. Any weak references should have been 442 // cleared as well. Visit all the metadata and ensure that it's 443 // really alive. 444 void CompiledMethod::verify_metadata_loaders(address low_boundary) { 445 #ifdef ASSERT 446 RelocIterator iter(this, low_boundary); 447 while (iter.next()) { 448 // static_stub_Relocations may have dangling references to 449 // Method*s so trim them out here. Otherwise it looks like 450 // compiled code is maintaining a link to dead metadata. 451 address static_call_addr = NULL; 452 if (iter.type() == relocInfo::opt_virtual_call_type) { 453 CompiledIC* cic = CompiledIC_at(&iter); 454 if (!cic->is_call_to_interpreted()) { 455 static_call_addr = iter.addr(); 456 } 457 } else if (iter.type() == relocInfo::static_call_type) { 458 CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc()); 459 if (!csc->is_call_to_interpreted()) { 460 static_call_addr = iter.addr(); 461 } 462 } 463 if (static_call_addr != NULL) { 464 RelocIterator sciter(this, low_boundary); 465 while (sciter.next()) { 466 if (sciter.type() == relocInfo::static_stub_type && 467 sciter.static_stub_reloc()->static_call() == static_call_addr) { 468 sciter.static_stub_reloc()->clear_inline_cache(); 469 } 470 } 471 } 472 } 473 // Check that the metadata embedded in the nmethod is alive 474 metadata_do(check_class); 475 #endif 476 } 477 478 // This is called at the end of the strong tracing/marking phase of a 479 // GC to unload an nmethod if it contains otherwise unreachable 480 // oops. 481 482 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 483 // Make sure the oop's ready to receive visitors 484 assert(!is_zombie() && !is_unloaded(), 485 "should not call follow on zombie or unloaded nmethod"); 486 487 // If the method is not entrant then a JMP is plastered over the 488 // first few bytes. If an oop in the old code was there, that oop 489 // should not get GC'd. Skip the first few bytes of oops on 490 // not-entrant methods. 491 address low_boundary = verified_entry_point(); 492 if (is_not_entrant()) { 493 low_boundary += NativeJump::instruction_size; 494 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 495 // (See comment above.) 496 } 497 498 // Exception cache 499 clean_exception_cache(); 500 501 // If class unloading occurred we first iterate over all inline caches and 502 // clear ICs where the cached oop is referring to an unloaded klass or method. 503 // The remaining live cached oops will be traversed in the relocInfo::oop_type 504 // iteration below. 505 if (unloading_occurred) { 506 RelocIterator iter(this, low_boundary); 507 while(iter.next()) { 508 if (iter.type() == relocInfo::virtual_call_type) { 509 CompiledIC *ic = CompiledIC_at(&iter); 510 clean_ic_if_metadata_is_dead(ic); 511 } 512 } 513 } 514 515 if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) { 516 return; 517 } 518 519 #if INCLUDE_JVMCI 520 if (do_unloading_jvmci(unloading_occurred)) { 521 return; 522 } 523 #endif 524 525 // Ensure that all metadata is still alive 526 verify_metadata_loaders(low_boundary); 527 } 528 529 template <class CompiledICorStaticCall> 530 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from) { 531 // Ok, to lookup references to zombies here 532 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); 533 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 534 if (nm != NULL) { 535 if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) { 536 // The nmethod has not been processed yet. 537 return true; 538 } 539 540 // Clean inline caches pointing to both zombie and not_entrant methods 541 if (!nm->is_in_use() || (nm->method()->code() != nm)) { 542 ic->set_to_clean(); 543 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); 544 } 545 } 546 547 return false; 548 } 549 550 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from) { 551 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from); 552 } 553 554 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from) { 555 return clean_if_nmethod_is_unloaded(csc, csc->destination(), from); 556 } 557 558 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) { 559 ResourceMark rm; 560 561 // Make sure the oop's ready to receive visitors 562 assert(!is_zombie() && !is_unloaded(), 563 "should not call follow on zombie or unloaded nmethod"); 564 565 // If the method is not entrant then a JMP is plastered over the 566 // first few bytes. If an oop in the old code was there, that oop 567 // should not get GC'd. Skip the first few bytes of oops on 568 // not-entrant methods. 569 address low_boundary = verified_entry_point(); 570 if (is_not_entrant()) { 571 low_boundary += NativeJump::instruction_size; 572 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 573 // (See comment above.) 574 } 575 576 // Exception cache 577 clean_exception_cache(); 578 579 bool postponed = false; 580 581 RelocIterator iter(this, low_boundary); 582 while(iter.next()) { 583 584 switch (iter.type()) { 585 586 case relocInfo::virtual_call_type: 587 if (unloading_occurred) { 588 // If class unloading occurred we first iterate over all inline caches and 589 // clear ICs where the cached oop is referring to an unloaded klass or method. 590 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter)); 591 } 592 593 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this); 594 break; 595 596 case relocInfo::opt_virtual_call_type: 597 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this); 598 break; 599 600 case relocInfo::static_call_type: 601 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this); 602 break; 603 604 case relocInfo::oop_type: 605 // handled by do_unloading_oops below 606 break; 607 608 case relocInfo::metadata_type: 609 break; // nothing to do. 610 611 default: 612 break; 613 } 614 } 615 616 if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) { 617 return postponed; 618 } 619 620 #if INCLUDE_JVMCI 621 if (do_unloading_jvmci(unloading_occurred)) { 622 return postponed; 623 } 624 #endif 625 626 // Ensure that all metadata is still alive 627 verify_metadata_loaders(low_boundary); 628 629 return postponed; 630 } 631 632 void CompiledMethod::do_unloading_parallel_postponed() { 633 ResourceMark rm; 634 635 // Make sure the oop's ready to receive visitors 636 assert(!is_zombie(), 637 "should not call follow on zombie nmethod"); 638 639 // If the method is not entrant then a JMP is plastered over the 640 // first few bytes. If an oop in the old code was there, that oop 641 // should not get GC'd. Skip the first few bytes of oops on 642 // not-entrant methods. 643 address low_boundary = verified_entry_point(); 644 if (is_not_entrant()) { 645 low_boundary += NativeJump::instruction_size; 646 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 647 // (See comment above.) 648 } 649 650 RelocIterator iter(this, low_boundary); 651 while(iter.next()) { 652 653 switch (iter.type()) { 654 655 case relocInfo::virtual_call_type: 656 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this); 657 break; 658 659 case relocInfo::opt_virtual_call_type: 660 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this); 661 break; 662 663 case relocInfo::static_call_type: 664 clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this); 665 break; 666 667 default: 668 break; 669 } 670 } 671 }