1 /* 2 * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/compiledIC.hpp" 27 #include "code/scopeDesc.hpp" 28 #include "code/codeCache.hpp" 29 #include "prims/methodHandles.hpp" 30 #include "interpreter/bytecode.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "runtime/mutexLocker.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 35 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) 36 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 37 _method(method), _mark_for_deoptimization_status(not_marked) { 38 init_defaults(); 39 } 40 41 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) 42 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 43 _method(method), _mark_for_deoptimization_status(not_marked) { 44 init_defaults(); 45 } 46 47 void CompiledMethod::init_defaults() { 48 _has_unsafe_access = 0; 49 _has_method_handle_invokes = 0; 50 _lazy_critical_native = 0; 51 _has_wide_vectors = 0; 52 _unloading_clock = 0; 53 } 54 55 bool CompiledMethod::is_method_handle_return(address return_pc) { 56 if (!has_method_handle_invokes()) return false; 57 PcDesc* pd = pc_desc_at(return_pc); 58 if (pd == NULL) 59 return false; 60 return pd->is_method_handle_invoke(); 61 } 62 63 // When using JVMCI the address might be off by the size of a call instruction. 64 bool CompiledMethod::is_deopt_entry(address pc) { 65 return pc == deopt_handler_begin() 66 #if INCLUDE_JVMCI 67 || (is_compiled_by_jvmci() && pc == (deopt_handler_begin() + NativeCall::instruction_size)) 68 #endif 69 ; 70 } 71 72 // Returns a string version of the method state. 73 const char* CompiledMethod::state() const { 74 int state = get_state(); 75 switch (state) { 76 case in_use: 77 return "in use"; 78 case not_used: 79 return "not_used"; 80 case not_entrant: 81 return "not_entrant"; 82 case zombie: 83 return "zombie"; 84 case unloaded: 85 return "unloaded"; 86 default: 87 fatal("unexpected method state: %d", state); 88 return NULL; 89 } 90 } 91 92 //----------------------------------------------------------------------------- 93 94 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { 95 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); 96 assert(new_entry != NULL,"Must be non null"); 97 assert(new_entry->next() == NULL, "Must be null"); 98 99 ExceptionCache *ec = exception_cache(); 100 if (ec != NULL) { 101 new_entry->set_next(ec); 102 } 103 release_set_exception_cache(new_entry); 104 } 105 106 void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) { 107 ExceptionCache* prev = NULL; 108 ExceptionCache* curr = exception_cache(); 109 110 while (curr != NULL) { 111 ExceptionCache* next = curr->next(); 112 113 Klass* ex_klass = curr->exception_type(); 114 if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) { 115 if (prev == NULL) { 116 set_exception_cache(next); 117 } else { 118 prev->set_next(next); 119 } 120 delete curr; 121 // prev stays the same. 122 } else { 123 prev = curr; 124 } 125 126 curr = next; 127 } 128 } 129 130 // public method for accessing the exception cache 131 // These are the public access methods. 132 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { 133 // We never grab a lock to read the exception cache, so we may 134 // have false negatives. This is okay, as it can only happen during 135 // the first few exception lookups for a given nmethod. 136 ExceptionCache* ec = exception_cache(); 137 while (ec != NULL) { 138 address ret_val; 139 if ((ret_val = ec->match(exception,pc)) != NULL) { 140 return ret_val; 141 } 142 ec = ec->next(); 143 } 144 return NULL; 145 } 146 147 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { 148 // There are potential race conditions during exception cache updates, so we 149 // must own the ExceptionCache_lock before doing ANY modifications. Because 150 // we don't lock during reads, it is possible to have several threads attempt 151 // to update the cache with the same data. We need to check for already inserted 152 // copies of the current data before adding it. 153 154 MutexLocker ml(ExceptionCache_lock); 155 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); 156 157 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { 158 target_entry = new ExceptionCache(exception,pc,handler); 159 add_exception_cache_entry(target_entry); 160 } 161 } 162 163 //-------------end of code for ExceptionCache-------------- 164 165 // private method for handling exception cache 166 // These methods are private, and used to manipulate the exception cache 167 // directly. 168 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { 169 ExceptionCache* ec = exception_cache(); 170 while (ec != NULL) { 171 if (ec->match_exception_with_space(exception)) { 172 return ec; 173 } 174 ec = ec->next(); 175 } 176 return NULL; 177 } 178 179 bool CompiledMethod::is_at_poll_return(address pc) { 180 RelocIterator iter(this, pc, pc+1); 181 while (iter.next()) { 182 if (iter.type() == relocInfo::poll_return_type) 183 return true; 184 } 185 return false; 186 } 187 188 189 bool CompiledMethod::is_at_poll_or_poll_return(address pc) { 190 RelocIterator iter(this, pc, pc+1); 191 while (iter.next()) { 192 relocInfo::relocType t = iter.type(); 193 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) 194 return true; 195 } 196 return false; 197 } 198 199 void CompiledMethod::verify_oop_relocations() { 200 // Ensure sure that the code matches the current oop values 201 RelocIterator iter(this, NULL, NULL); 202 while (iter.next()) { 203 if (iter.type() == relocInfo::oop_type) { 204 oop_Relocation* reloc = iter.oop_reloc(); 205 if (!reloc->oop_is_immediate()) { 206 reloc->verify_oop_relocation(); 207 } 208 } 209 } 210 } 211 212 213 ScopeDesc* CompiledMethod::scope_desc_at(address pc) { 214 PcDesc* pd = pc_desc_at(pc); 215 guarantee(pd != NULL, "scope must be present"); 216 return new ScopeDesc(this, pd->scope_decode_offset(), 217 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 218 pd->return_oop(), pd->return_vt()); 219 } 220 221 void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) { 222 assert_locked_or_safepoint(CompiledIC_lock); 223 224 // If the method is not entrant or zombie then a JMP is plastered over the 225 // first few bytes. If an oop in the old code was there, that oop 226 // should not get GC'd. Skip the first few bytes of oops on 227 // not-entrant methods. 228 address low_boundary = verified_entry_point(); 229 if (!is_in_use() && is_nmethod()) { 230 low_boundary += NativeJump::instruction_size; 231 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 232 // This means that the low_boundary is going to be a little too high. 233 // This shouldn't matter, since oops of non-entrant methods are never used. 234 // In fact, why are we bothering to look at oops in a non-entrant method?? 235 } 236 237 // Find all calls in an nmethod and clear the ones that point to non-entrant, 238 // zombie and unloaded nmethods. 239 ResourceMark rm; 240 RelocIterator iter(this, low_boundary); 241 while(iter.next()) { 242 switch(iter.type()) { 243 case relocInfo::virtual_call_type: 244 case relocInfo::opt_virtual_call_type: { 245 CompiledIC *ic = CompiledIC_at(&iter); 246 // Ok, to lookup references to zombies here 247 CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); 248 if( cb != NULL && cb->is_compiled() ) { 249 CompiledMethod* nm = cb->as_compiled_method(); 250 // Clean inline caches pointing to zombie, non-entrant and unloaded methods 251 if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive()); 252 } 253 break; 254 } 255 case relocInfo::static_call_type: { 256 CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc()); 257 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); 258 if( cb != NULL && cb->is_compiled() ) { 259 CompiledMethod* cm = cb->as_compiled_method(); 260 // Clean inline caches pointing to zombie, non-entrant and unloaded methods 261 if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) { 262 csc->set_to_clean(); 263 } 264 } 265 break; 266 } 267 } 268 } 269 } 270 271 int CompiledMethod::verify_icholder_relocations() { 272 ResourceMark rm; 273 int count = 0; 274 275 RelocIterator iter(this); 276 while(iter.next()) { 277 if (iter.type() == relocInfo::virtual_call_type) { 278 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) { 279 CompiledIC *ic = CompiledIC_at(&iter); 280 if (TraceCompiledIC) { 281 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); 282 ic->print(); 283 } 284 assert(ic->cached_icholder() != NULL, "must be non-NULL"); 285 count++; 286 } 287 } 288 } 289 290 return count; 291 } 292 293 // Method that knows how to preserve outgoing arguments at call. This method must be 294 // called with a frame corresponding to a Java invoke 295 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { 296 #ifndef SHARK 297 if (method() != NULL && !method()->is_native()) { 298 address pc = fr.pc(); 299 SimpleScopeDesc ssd(this, pc); 300 Bytecode_invoke call(ssd.method(), ssd.bci()); 301 bool has_receiver = call.has_receiver(); 302 bool has_appendix = call.has_appendix(); 303 Symbol* signature = call.signature(); 304 305 // The method attached by JIT-compilers should be used, if present. 306 // Bytecode can be inaccurate in such case. 307 Method* callee = attached_method_before_pc(pc); 308 if (callee != NULL) { 309 has_receiver = !(callee->access_flags().is_static()); 310 has_appendix = false; 311 signature = callee->signature(); 312 } 313 314 // If value types are passed as fields, use the extended signature 315 // which contains the types of all (oop) fields of the value type. 316 if (ValueTypePassFieldsAsArgs) { 317 // Check if receiver or one of the arguments is a value type 318 bool has_value_receiver = (callee != NULL && callee->method_holder()->is_value()); 319 bool has_value_argument = has_value_receiver; 320 const int len = signature->utf8_length(); 321 for (int i = 0; i < len && !has_value_argument; ++i) { 322 if (signature->byte_at(i) == 'Q') { 323 has_value_argument = true; 324 } 325 } 326 if (has_value_argument) { 327 // Get the extended signature from the callee's adapter through the attached method 328 assert(callee != NULL, "must have attached method"); 329 signature = callee->adapter()->get_sig_extended(); 330 assert(signature != NULL, "signature is null"); 331 has_receiver = false; // The extended signature contains the receiver type 332 } 333 } 334 335 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); 336 } 337 #endif // !SHARK 338 } 339 340 // ----------------------------------------------------------------------------- 341 // CompiledMethod::get_deopt_original_pc 342 // 343 // Return the original PC for the given PC if: 344 // (a) the given PC belongs to a nmethod and 345 // (b) it is a deopt PC 346 address CompiledMethod::get_deopt_original_pc(const frame* fr) { 347 if (fr->cb() == NULL) return NULL; 348 349 CompiledMethod* cm = fr->cb()->as_compiled_method_or_null(); 350 if (cm != NULL && cm->is_deopt_pc(fr->pc())) 351 return cm->get_original_pc(fr); 352 353 return NULL; 354 } 355 356 Method* CompiledMethod::attached_method(address call_instr) { 357 assert(code_contains(call_instr), "not part of the nmethod"); 358 RelocIterator iter(this, call_instr, call_instr + 1); 359 while (iter.next()) { 360 if (iter.addr() == call_instr) { 361 switch(iter.type()) { 362 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); 363 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); 364 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); 365 } 366 } 367 } 368 return NULL; // not found 369 } 370 371 Method* CompiledMethod::attached_method_before_pc(address pc) { 372 if (NativeCall::is_call_before(pc)) { 373 NativeCall* ncall = nativeCall_before(pc); 374 return attached_method(ncall->instruction_address()); 375 } 376 return NULL; // not a call 377 } 378 379 void CompiledMethod::clear_inline_caches() { 380 assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); 381 if (is_zombie()) { 382 return; 383 } 384 385 RelocIterator iter(this); 386 while (iter.next()) { 387 iter.reloc()->clear_inline_cache(); 388 } 389 } 390 391 // Clear ICStubs of all compiled ICs 392 void CompiledMethod::clear_ic_stubs() { 393 assert_locked_or_safepoint(CompiledIC_lock); 394 RelocIterator iter(this); 395 while(iter.next()) { 396 if (iter.type() == relocInfo::virtual_call_type) { 397 CompiledIC* ic = CompiledIC_at(&iter); 398 ic->clear_ic_stub(); 399 } 400 } 401 } 402 403 #ifdef ASSERT 404 405 class CheckClass : AllStatic { 406 static BoolObjectClosure* _is_alive; 407 408 // Check class_loader is alive for this bit of metadata. 409 static void check_class(Metadata* md) { 410 Klass* klass = NULL; 411 if (md->is_klass()) { 412 klass = ((Klass*)md); 413 } else if (md->is_method()) { 414 klass = ((Method*)md)->method_holder(); 415 } else if (md->is_methodData()) { 416 klass = ((MethodData*)md)->method()->method_holder(); 417 } else { 418 md->print(); 419 ShouldNotReachHere(); 420 } 421 assert(klass->is_loader_alive(_is_alive), "must be alive"); 422 } 423 public: 424 static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) { 425 assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint"); 426 _is_alive = is_alive; 427 nm->metadata_do(check_class); 428 } 429 }; 430 431 // This is called during a safepoint so can use static data 432 BoolObjectClosure* CheckClass::_is_alive = NULL; 433 #endif // ASSERT 434 435 436 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) { 437 if (ic->is_icholder_call()) { 438 // The only exception is compiledICHolder oops which may 439 // yet be marked below. (We check this further below). 440 CompiledICHolder* cichk_oop = ic->cached_icholder(); 441 442 if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) && 443 cichk_oop->holder_klass()->is_loader_alive(is_alive)) { 444 return; 445 } 446 } else { 447 Metadata* ic_oop = ic->cached_metadata(); 448 if (ic_oop != NULL) { 449 if (ic_oop->is_klass()) { 450 if (((Klass*)ic_oop)->is_loader_alive(is_alive)) { 451 return; 452 } 453 } else if (ic_oop->is_method()) { 454 if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) { 455 return; 456 } 457 } else { 458 ShouldNotReachHere(); 459 } 460 } 461 } 462 463 ic->set_to_clean(); 464 } 465 466 unsigned char CompiledMethod::_global_unloading_clock = 0; 467 468 void CompiledMethod::increase_unloading_clock() { 469 _global_unloading_clock++; 470 if (_global_unloading_clock == 0) { 471 // _nmethods are allocated with _unloading_clock == 0, 472 // so 0 is never used as a clock value. 473 _global_unloading_clock = 1; 474 } 475 } 476 477 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) { 478 OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock); 479 } 480 481 unsigned char CompiledMethod::unloading_clock() { 482 return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock); 483 } 484 485 // Processing of oop references should have been sufficient to keep 486 // all strong references alive. Any weak references should have been 487 // cleared as well. Visit all the metadata and ensure that it's 488 // really alive. 489 void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) { 490 #ifdef ASSERT 491 RelocIterator iter(this, low_boundary); 492 while (iter.next()) { 493 // static_stub_Relocations may have dangling references to 494 // Method*s so trim them out here. Otherwise it looks like 495 // compiled code is maintaining a link to dead metadata. 496 address static_call_addr = NULL; 497 if (iter.type() == relocInfo::opt_virtual_call_type) { 498 CompiledIC* cic = CompiledIC_at(&iter); 499 if (!cic->is_call_to_interpreted()) { 500 static_call_addr = iter.addr(); 501 } 502 } else if (iter.type() == relocInfo::static_call_type) { 503 CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc()); 504 if (!csc->is_call_to_interpreted()) { 505 static_call_addr = iter.addr(); 506 } 507 } 508 if (static_call_addr != NULL) { 509 RelocIterator sciter(this, low_boundary); 510 while (sciter.next()) { 511 if (sciter.type() == relocInfo::static_stub_type && 512 sciter.static_stub_reloc()->static_call() == static_call_addr) { 513 sciter.static_stub_reloc()->clear_inline_cache(); 514 } 515 } 516 } 517 } 518 // Check that the metadata embedded in the nmethod is alive 519 CheckClass::do_check_class(is_alive, this); 520 #endif 521 } 522 523 // This is called at the end of the strong tracing/marking phase of a 524 // GC to unload an nmethod if it contains otherwise unreachable 525 // oops. 526 527 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 528 // Make sure the oop's ready to receive visitors 529 assert(!is_zombie() && !is_unloaded(), 530 "should not call follow on zombie or unloaded nmethod"); 531 532 // If the method is not entrant then a JMP is plastered over the 533 // first few bytes. If an oop in the old code was there, that oop 534 // should not get GC'd. Skip the first few bytes of oops on 535 // not-entrant methods. 536 address low_boundary = verified_entry_point(); 537 if (is_not_entrant()) { 538 low_boundary += NativeJump::instruction_size; 539 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 540 // (See comment above.) 541 } 542 543 // The RedefineClasses() API can cause the class unloading invariant 544 // to no longer be true. See jvmtiExport.hpp for details. 545 // Also, leave a debugging breadcrumb in local flag. 546 if (JvmtiExport::has_redefined_a_class()) { 547 // This set of the unloading_occurred flag is done before the 548 // call to post_compiled_method_unload() so that the unloading 549 // of this nmethod is reported. 550 unloading_occurred = true; 551 } 552 553 // Exception cache 554 clean_exception_cache(is_alive); 555 556 // If class unloading occurred we first iterate over all inline caches and 557 // clear ICs where the cached oop is referring to an unloaded klass or method. 558 // The remaining live cached oops will be traversed in the relocInfo::oop_type 559 // iteration below. 560 if (unloading_occurred) { 561 RelocIterator iter(this, low_boundary); 562 while(iter.next()) { 563 if (iter.type() == relocInfo::virtual_call_type) { 564 CompiledIC *ic = CompiledIC_at(&iter); 565 clean_ic_if_metadata_is_dead(ic, is_alive); 566 } 567 } 568 } 569 570 if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) { 571 return; 572 } 573 574 #if INCLUDE_JVMCI 575 if (do_unloading_jvmci(is_alive, unloading_occurred)) { 576 return; 577 } 578 #endif 579 580 // Ensure that all metadata is still alive 581 verify_metadata_loaders(low_boundary, is_alive); 582 } 583 584 template <class CompiledICorStaticCall> 585 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) { 586 // Ok, to lookup references to zombies here 587 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); 588 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 589 if (nm != NULL) { 590 if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) { 591 // The nmethod has not been processed yet. 592 return true; 593 } 594 595 // Clean inline caches pointing to both zombie and not_entrant methods 596 if (!nm->is_in_use() || (nm->method()->code() != nm)) { 597 ic->set_to_clean(); 598 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); 599 } 600 } 601 602 return false; 603 } 604 605 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) { 606 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from); 607 } 608 609 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) { 610 return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from); 611 } 612 613 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) { 614 ResourceMark rm; 615 616 // Make sure the oop's ready to receive visitors 617 assert(!is_zombie() && !is_unloaded(), 618 "should not call follow on zombie or unloaded nmethod"); 619 620 // If the method is not entrant then a JMP is plastered over the 621 // first few bytes. If an oop in the old code was there, that oop 622 // should not get GC'd. Skip the first few bytes of oops on 623 // not-entrant methods. 624 address low_boundary = verified_entry_point(); 625 if (is_not_entrant()) { 626 low_boundary += NativeJump::instruction_size; 627 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 628 // (See comment above.) 629 } 630 631 // The RedefineClasses() API can cause the class unloading invariant 632 // to no longer be true. See jvmtiExport.hpp for details. 633 // Also, leave a debugging breadcrumb in local flag. 634 if (JvmtiExport::has_redefined_a_class()) { 635 // This set of the unloading_occurred flag is done before the 636 // call to post_compiled_method_unload() so that the unloading 637 // of this nmethod is reported. 638 unloading_occurred = true; 639 } 640 641 // Exception cache 642 clean_exception_cache(is_alive); 643 644 bool postponed = false; 645 646 RelocIterator iter(this, low_boundary); 647 while(iter.next()) { 648 649 switch (iter.type()) { 650 651 case relocInfo::virtual_call_type: 652 if (unloading_occurred) { 653 // If class unloading occurred we first iterate over all inline caches and 654 // clear ICs where the cached oop is referring to an unloaded klass or method. 655 clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive); 656 } 657 658 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); 659 break; 660 661 case relocInfo::opt_virtual_call_type: 662 postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); 663 break; 664 665 case relocInfo::static_call_type: 666 postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this); 667 break; 668 669 case relocInfo::oop_type: 670 // handled by do_unloading_oops below 671 break; 672 673 case relocInfo::metadata_type: 674 break; // nothing to do. 675 } 676 } 677 678 if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) { 679 return postponed; 680 } 681 682 #if INCLUDE_JVMCI 683 if (do_unloading_jvmci(is_alive, unloading_occurred)) { 684 return postponed; 685 } 686 #endif 687 688 // Ensure that all metadata is still alive 689 verify_metadata_loaders(low_boundary, is_alive); 690 691 return postponed; 692 } 693 694 void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) { 695 ResourceMark rm; 696 697 // Make sure the oop's ready to receive visitors 698 assert(!is_zombie(), 699 "should not call follow on zombie nmethod"); 700 701 // If the method is not entrant then a JMP is plastered over the 702 // first few bytes. If an oop in the old code was there, that oop 703 // should not get GC'd. Skip the first few bytes of oops on 704 // not-entrant methods. 705 address low_boundary = verified_entry_point(); 706 if (is_not_entrant()) { 707 low_boundary += NativeJump::instruction_size; 708 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 709 // (See comment above.) 710 } 711 712 RelocIterator iter(this, low_boundary); 713 while(iter.next()) { 714 715 switch (iter.type()) { 716 717 case relocInfo::virtual_call_type: 718 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); 719 break; 720 721 case relocInfo::opt_virtual_call_type: 722 clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this); 723 break; 724 725 case relocInfo::static_call_type: 726 clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this); 727 break; 728 } 729 } 730 }