1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/nmethod.hpp" 31 #include "code/vtableStubs.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/linkResolver.hpp" 34 #include "memory/metadataFactory.hpp" 35 #include "memory/oopFactory.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/method.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "oops/symbol.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/icache.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "utilities/events.hpp" 45 46 47 // Every time a compiled IC is changed or its type is being accessed, 48 // either the CompiledIC_lock must be set or we must be at a safe point. 49 50 //----------------------------------------------------------------------------- 51 // Low-level access to an inline cache. Private, since they might not be 52 // MT-safe to use. 53 54 void* CompiledIC::cached_value() const { 55 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 56 assert (!is_optimized(), "an optimized virtual call does not have a cached metadata"); 57 58 if (!is_in_transition_state()) { 59 void* data = get_data(); 60 // If we let the metadata value here be initialized to zero... 61 assert(data != NULL || Universe::non_oop_word() == NULL, 62 "no raw nulls in CompiledIC metadatas, because of patching races"); 63 return (data == (void*)Universe::non_oop_word()) ? NULL : data; 64 } else { 65 return InlineCacheBuffer::cached_value_for((CompiledIC *)this); 66 } 67 } 68 69 70 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) { 71 assert(entry_point != NULL, "must set legal entry point"); 72 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 73 assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata"); 74 assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata"); 75 76 assert(!is_icholder || is_icholder_entry(entry_point), "must be"); 77 78 // Don't use ic_destination for this test since that forwards 79 // through ICBuffer instead of returning the actual current state of 80 // the CompiledIC. 81 if (is_icholder_entry(_call->destination())) { 82 // When patching for the ICStub case the cached value isn't 83 // overwritten until the ICStub copied into the CompiledIC during 84 // the next safepoint. Make sure that the CompiledICHolder* is 85 // marked for release at this point since it won't be identifiable 86 // once the entry point is overwritten. 87 InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data()); 88 } 89 90 if (TraceCompiledIC) { 91 tty->print(" "); 92 print_compiled_ic(); 93 tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point)); 94 if (!is_optimized()) { 95 tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache)); 96 } 97 if (is_icstub) { 98 tty->print(" (icstub)"); 99 } 100 tty->cr(); 101 } 102 103 { 104 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); 105 #ifdef ASSERT 106 CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address()); 107 assert(cb != NULL && cb->is_compiled(), "must be compiled"); 108 #endif 109 _call->set_destination_mt_safe(entry_point); 110 } 111 112 if (is_optimized() || is_icstub) { 113 // Optimized call sites don't have a cache value and ICStub call 114 // sites only change the entry point. Changing the value in that 115 // case could lead to MT safety issues. 116 assert(cache == NULL, "must be null"); 117 return; 118 } 119 120 if (cache == NULL) cache = (void*)Universe::non_oop_word(); 121 122 set_data((intptr_t)cache); 123 } 124 125 126 void CompiledIC::set_ic_destination(ICStub* stub) { 127 internal_set_ic_destination(stub->code_begin(), true, NULL, false); 128 } 129 130 131 132 address CompiledIC::ic_destination() const { 133 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 134 if (!is_in_transition_state()) { 135 return _call->destination(); 136 } else { 137 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this); 138 } 139 } 140 141 142 bool CompiledIC::is_in_transition_state() const { 143 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 144 return InlineCacheBuffer::contains(_call->destination());; 145 } 146 147 148 bool CompiledIC::is_icholder_call() const { 149 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 150 return !_is_optimized && is_icholder_entry(ic_destination()); 151 } 152 153 // Returns native address of 'call' instruction in inline-cache. Used by 154 // the InlineCacheBuffer when it needs to find the stub. 155 address CompiledIC::stub_address() const { 156 assert(is_in_transition_state(), "should only be called when we are in a transition state"); 157 return _call->destination(); 158 } 159 160 // Clears the IC stub if the compiled IC is in transition state 161 void CompiledIC::clear_ic_stub() { 162 if (is_in_transition_state()) { 163 ICStub* stub = ICStub_from_destination_address(stub_address()); 164 stub->clear(); 165 } 166 } 167 168 //----------------------------------------------------------------------------- 169 // High-level access to an inline cache. Guaranteed to be MT-safe. 170 171 void CompiledIC::initialize_from_iter(RelocIterator* iter) { 172 assert(iter->addr() == _call->instruction_address(), "must find ic_call"); 173 174 if (iter->type() == relocInfo::virtual_call_type) { 175 virtual_call_Relocation* r = iter->virtual_call_reloc(); 176 _is_optimized = false; 177 _value = _call->get_load_instruction(r); 178 } else { 179 assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); 180 _is_optimized = true; 181 _value = NULL; 182 } 183 } 184 185 CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call) 186 : _method(cm) 187 { 188 _call = _method->call_wrapper_at((address) call); 189 address ic_call = _call->instruction_address(); 190 191 assert(ic_call != NULL, "ic_call address must be set"); 192 assert(cm != NULL, "must pass compiled method"); 193 assert(cm->contains(ic_call), "must be in compiled method"); 194 195 // Search for the ic_call at the given address. 196 RelocIterator iter(cm, ic_call, ic_call+1); 197 bool ret = iter.next(); 198 assert(ret == true, "relocInfo must exist at this address"); 199 assert(iter.addr() == ic_call, "must find ic_call"); 200 201 initialize_from_iter(&iter); 202 } 203 204 CompiledIC::CompiledIC(RelocIterator* iter) 205 : _method(iter->code()) 206 { 207 _call = _method->call_wrapper_at(iter->addr()); 208 address ic_call = _call->instruction_address(); 209 210 CompiledMethod* nm = iter->code(); 211 assert(ic_call != NULL, "ic_call address must be set"); 212 assert(nm != NULL, "must pass compiled method"); 213 assert(nm->contains(ic_call), "must be in compiled method"); 214 215 initialize_from_iter(iter); 216 } 217 218 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { 219 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 220 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); 221 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); 222 223 address entry; 224 if (call_info->call_kind() == CallInfo::itable_call) { 225 assert(bytecode == Bytecodes::_invokeinterface, ""); 226 int itable_index = call_info->itable_index(); 227 entry = VtableStubs::find_itable_stub(itable_index); 228 if (entry == NULL) { 229 return false; 230 } 231 #ifdef ASSERT 232 int index = call_info->resolved_method()->itable_index(); 233 assert(index == itable_index, "CallInfo pre-computes this"); 234 InstanceKlass* k = call_info->resolved_method()->method_holder(); 235 assert(k->verify_itable_index(itable_index), "sanity check"); 236 #endif //ASSERT 237 CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(), 238 call_info->resolved_klass(), false); 239 holder->claim(); 240 InlineCacheBuffer::create_transition_stub(this, holder, entry); 241 } else { 242 assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable"); 243 // Can be different than selected_method->vtable_index(), due to package-private etc. 244 int vtable_index = call_info->vtable_index(); 245 assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check"); 246 entry = VtableStubs::find_vtable_stub(vtable_index); 247 if (entry == NULL) { 248 return false; 249 } 250 InlineCacheBuffer::create_transition_stub(this, NULL, entry); 251 } 252 253 if (TraceICs) { 254 ResourceMark rm; 255 assert(!call_info->selected_method().is_null(), "Unexpected null selected method"); 256 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, 257 p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry)); 258 } 259 260 // We can't check this anymore. With lazy deopt we could have already 261 // cleaned this IC entry before we even return. This is possible if 262 // we ran out of space in the inline cache buffer trying to do the 263 // set_next and we safepointed to free up space. This is a benign 264 // race because the IC entry was complete when we safepointed so 265 // cleaning it immediately is harmless. 266 // assert(is_megamorphic(), "sanity check"); 267 return true; 268 } 269 270 271 // true if destination is megamorphic stub 272 bool CompiledIC::is_megamorphic() const { 273 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 274 assert(!is_optimized(), "an optimized call cannot be megamorphic"); 275 276 // Cannot rely on cached_value. It is either an interface or a method. 277 return VtableStubs::entry_point(ic_destination()) != NULL; 278 } 279 280 bool CompiledIC::is_call_to_compiled() const { 281 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 282 283 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie 284 // method is guaranteed to still exist, since we only remove methods after all inline caches 285 // has been cleaned up 286 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 287 bool is_monomorphic = (cb != NULL && cb->is_compiled()); 288 // Check that the cached_value is a klass for non-optimized monomorphic calls 289 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used 290 // for calling directly to vep without using the inline cache (i.e., cached_value == NULL). 291 // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized 292 // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites. 293 #ifdef ASSERT 294 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address()); 295 bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci(); 296 assert( is_c1_or_jvmci_method || 297 !is_monomorphic || 298 is_optimized() || 299 !caller->is_alive() || 300 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check"); 301 #endif // ASSERT 302 return is_monomorphic; 303 } 304 305 306 bool CompiledIC::is_call_to_interpreted() const { 307 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 308 // Call to interpreter if destination is either calling to a stub (if it 309 // is optimized), or calling to an I2C blob 310 bool is_call_to_interpreted = false; 311 if (!is_optimized()) { 312 // must use unsafe because the destination can be a zombie (and we're cleaning) 313 // and the print_compiled_ic code wants to know if site (in the non-zombie) 314 // is to the interpreter. 315 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 316 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob()); 317 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check"); 318 } else { 319 // Check if we are calling into our own codeblob (i.e., to a stub) 320 address dest = ic_destination(); 321 #ifdef ASSERT 322 { 323 _call->verify_resolve_call(dest); 324 } 325 #endif /* ASSERT */ 326 is_call_to_interpreted = _call->is_call_to_interpreted(dest); 327 } 328 return is_call_to_interpreted; 329 } 330 331 void CompiledIC::set_to_clean(bool in_use) { 332 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); 333 if (TraceInlineCacheClearing || TraceICs) { 334 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address())); 335 print(); 336 } 337 338 address entry = _call->get_resolve_call_stub(is_optimized()); 339 340 // A zombie transition will always be safe, since the metadata has already been set to NULL, so 341 // we only need to patch the destination 342 bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); 343 344 if (safe_transition) { 345 // Kill any leftover stub we might have too 346 clear_ic_stub(); 347 if (is_optimized()) { 348 set_ic_destination(entry); 349 } else { 350 set_ic_destination_and_value(entry, (void*)NULL); 351 } 352 } else { 353 // Unsafe transition - create stub. 354 InlineCacheBuffer::create_transition_stub(this, NULL, entry); 355 } 356 // We can't check this anymore. With lazy deopt we could have already 357 // cleaned this IC entry before we even return. This is possible if 358 // we ran out of space in the inline cache buffer trying to do the 359 // set_next and we safepointed to free up space. This is a benign 360 // race because the IC entry was complete when we safepointed so 361 // cleaning it immediately is harmless. 362 // assert(is_clean(), "sanity check"); 363 } 364 365 bool CompiledIC::is_clean() const { 366 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 367 bool is_clean = false; 368 address dest = ic_destination(); 369 is_clean = dest == _call->get_resolve_call_stub(is_optimized()); 370 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check"); 371 return is_clean; 372 } 373 374 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) { 375 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 376 // Updating a cache to the wrong entry can cause bugs that are very hard 377 // to track down - if cache entry gets invalid - we just clean it. In 378 // this way it is always the same code path that is responsible for 379 // updating and resolving an inline cache 380 // 381 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized 382 // callsites. In addition ic_miss code will update a site to monomorphic if it determines 383 // that an monomorphic call to the interpreter can now be monomorphic to compiled code. 384 // 385 // In both of these cases the only thing being modifed is the jump/call target and these 386 // transitions are mt_safe 387 388 Thread *thread = Thread::current(); 389 if (info.to_interpreter() || info.to_aot()) { 390 // Call to interpreter 391 if (info.is_optimized() && is_optimized()) { 392 assert(is_clean(), "unsafe IC path"); 393 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 394 // the call analysis (callee structure) specifies that the call is optimized 395 // (either because of CHA or the static target is final) 396 // At code generation time, this call has been emitted as static call 397 // Call via stub 398 assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check"); 399 methodHandle method (thread, (Method*)info.cached_metadata()); 400 _call->set_to_interpreted(method, info); 401 402 if (TraceICs) { 403 ResourceMark rm(thread); 404 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s", 405 p2i(instruction_address()), 406 (info.to_aot() ? "aot" : "interpreter"), 407 method->print_value_string()); 408 } 409 } else { 410 // Call via method-klass-holder 411 InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry()); 412 if (TraceICs) { 413 ResourceMark rm(thread); 414 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address())); 415 } 416 } 417 } else { 418 // Call to compiled code 419 bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL); 420 #ifdef ASSERT 421 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); 422 assert (cb != NULL && cb->is_compiled(), "must be compiled!"); 423 #endif /* ASSERT */ 424 425 // This is MT safe if we come from a clean-cache and go through a 426 // non-verified entry point 427 bool safe = SafepointSynchronize::is_at_safepoint() || 428 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); 429 430 if (!safe) { 431 InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry()); 432 } else { 433 if (is_optimized()) { 434 set_ic_destination(info.entry()); 435 } else { 436 set_ic_destination_and_value(info.entry(), info.cached_metadata()); 437 } 438 } 439 440 if (TraceICs) { 441 ResourceMark rm(thread); 442 assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be"); 443 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s", 444 p2i(instruction_address()), 445 ((Klass*)info.cached_metadata())->print_value_string(), 446 (safe) ? "" : "via stub"); 447 } 448 } 449 // We can't check this anymore. With lazy deopt we could have already 450 // cleaned this IC entry before we even return. This is possible if 451 // we ran out of space in the inline cache buffer trying to do the 452 // set_next and we safepointed to free up space. This is a benign 453 // race because the IC entry was complete when we safepointed so 454 // cleaning it immediately is harmless. 455 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); 456 } 457 458 459 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache) 460 // static_bound: The call can be static bound. If it isn't also optimized, the property 461 // wasn't provable at time of compilation. An optimized call will have any necessary 462 // null check, while a static_bound won't. A static_bound (but not optimized) must 463 // therefore use the unverified entry point. 464 void CompiledIC::compute_monomorphic_entry(const methodHandle& method, 465 Klass* receiver_klass, 466 bool is_optimized, 467 bool static_bound, 468 bool caller_is_nmethod, 469 CompiledICInfo& info, 470 TRAPS) { 471 CompiledMethod* method_code = method->code(); 472 473 address entry = NULL; 474 if (method_code != NULL && method_code->is_in_use()) { 475 assert(method_code->is_compiled(), "must be compiled"); 476 // Call to compiled code 477 // 478 // Note: the following problem exists with Compiler1: 479 // - at compile time we may or may not know if the destination is final 480 // - if we know that the destination is final (is_optimized), we will emit 481 // an optimized virtual call (no inline cache), and need a Method* to make 482 // a call to the interpreter 483 // - if we don't know if the destination is final, we emit a standard 484 // virtual call, and use CompiledICHolder to call interpreted code 485 // (no static call stub has been generated) 486 // - In the case that we here notice the call is static bound we 487 // convert the call into what looks to be an optimized virtual call, 488 // but we must use the unverified entry point (since there will be no 489 // null check on a call when the target isn't loaded). 490 // This causes problems when verifying the IC because 491 // it looks vanilla but is optimized. Code in is_call_to_interpreted 492 // is aware of this and weakens its asserts. 493 if (is_optimized) { 494 entry = method_code->verified_entry_point(); 495 } else { 496 entry = method_code->entry_point(); 497 } 498 } 499 bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code(); 500 if (entry != NULL && !far_c2a) { 501 // Call to near compiled code (nmethod or aot). 502 info.set_compiled_entry(entry, is_optimized ? NULL : receiver_klass, is_optimized); 503 } else { 504 if (is_optimized) { 505 if (far_c2a) { 506 // Call to aot code from nmethod. 507 info.set_aot_entry(entry, method()); 508 } else { 509 // Use stub entry 510 info.set_interpreter_entry(method()->get_c2i_entry(), method()); 511 } 512 } else { 513 // Use icholder entry 514 assert(method_code == NULL || method_code->is_compiled(), "must be compiled"); 515 CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass); 516 info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder); 517 } 518 } 519 assert(info.is_optimized() == is_optimized, "must agree"); 520 } 521 522 523 bool CompiledIC::is_icholder_entry(address entry) { 524 CodeBlob* cb = CodeCache::find_blob_unsafe(entry); 525 if (cb != NULL && cb->is_adapter_blob()) { 526 return true; 527 } 528 // itable stubs also use CompiledICHolder 529 if (cb != NULL && cb->is_vtable_blob()) { 530 VtableStub* s = VtableStubs::entry_point(entry); 531 return (s != NULL) && s->is_itable_stub(); 532 } 533 534 return false; 535 } 536 537 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) { 538 // This call site might have become stale so inspect it carefully. 539 address dest = cm->call_wrapper_at(call_site->addr())->destination(); 540 return is_icholder_entry(dest); 541 } 542 543 // Release the CompiledICHolder* associated with this call site is there is one. 544 void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) { 545 assert(cm->is_nmethod(), "must be nmethod"); 546 // This call site might have become stale so inspect it carefully. 547 NativeCall* call = nativeCall_at(call_site->addr()); 548 if (is_icholder_entry(call->destination())) { 549 NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); 550 InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); 551 } 552 } 553 554 // ---------------------------------------------------------------------------- 555 556 void CompiledStaticCall::set_to_clean(bool in_use) { 557 // in_use is unused but needed to match template function in CompiledMethod 558 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 559 // Reset call site 560 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); 561 #ifdef ASSERT 562 CodeBlob* cb = CodeCache::find_blob_unsafe(instruction_address()); 563 assert(cb != NULL && cb->is_compiled(), "must be compiled"); 564 #endif 565 566 set_destination_mt_safe(resolve_call_stub()); 567 568 // Do not reset stub here: It is too expensive to call find_stub. 569 // Instead, rely on caller (nmethod::clear_inline_caches) to clear 570 // both the call and its stub. 571 } 572 573 bool CompiledStaticCall::is_clean() const { 574 return destination() == resolve_call_stub(); 575 } 576 577 bool CompiledStaticCall::is_call_to_compiled() const { 578 return CodeCache::contains(destination()); 579 } 580 581 bool CompiledDirectStaticCall::is_call_to_interpreted() const { 582 // It is a call to interpreted, if it calls to a stub. Hence, the destination 583 // must be in the stub part of the nmethod that contains the call 584 CompiledMethod* cm = CodeCache::find_compiled(instruction_address()); 585 return cm->stub_contains(destination()); 586 } 587 588 bool CompiledDirectStaticCall::is_call_to_far() const { 589 // It is a call to aot method, if it calls to a stub. Hence, the destination 590 // must be in the stub part of the nmethod that contains the call 591 CodeBlob* desc = CodeCache::find_blob(instruction_address()); 592 return desc->as_compiled_method()->stub_contains(destination()); 593 } 594 595 void CompiledStaticCall::set_to_compiled(address entry) { 596 if (TraceICs) { 597 ResourceMark rm; 598 tty->print_cr("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, 599 name(), 600 p2i(instruction_address()), 601 p2i(entry)); 602 } 603 // Call to compiled code 604 assert(CodeCache::contains(entry), "wrong entry point"); 605 set_destination_mt_safe(entry); 606 } 607 608 void CompiledStaticCall::set(const StaticCallInfo& info) { 609 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 610 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 611 // Updating a cache to the wrong entry can cause bugs that are very hard 612 // to track down - if cache entry gets invalid - we just clean it. In 613 // this way it is always the same code path that is responsible for 614 // updating and resolving an inline cache 615 assert(is_clean(), "do not update a call entry - use clean"); 616 617 if (info._to_interpreter) { 618 // Call to interpreted code 619 set_to_interpreted(info.callee(), info.entry()); 620 #if INCLUDE_AOT 621 } else if (info._to_aot) { 622 // Call to far code 623 set_to_far(info.callee(), info.entry()); 624 #endif 625 } else { 626 set_to_compiled(info.entry()); 627 } 628 } 629 630 // Compute settings for a CompiledStaticCall. Since we might have to set 631 // the stub when calling to the interpreter, we need to return arguments. 632 void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info) { 633 CompiledMethod* m_code = m->code(); 634 info._callee = m; 635 if (m_code != NULL && m_code->is_in_use()) { 636 if (caller_is_nmethod && m_code->is_far_code()) { 637 // Call to far aot code from nmethod. 638 info._to_aot = true; 639 } else { 640 info._to_aot = false; 641 } 642 info._to_interpreter = false; 643 info._entry = m_code->verified_entry_point(); 644 } else { 645 // Callee is interpreted code. In any case entering the interpreter 646 // puts a converter-frame on the stack to save arguments. 647 assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics"); 648 info._to_interpreter = true; 649 info._entry = m()->get_c2i_entry(); 650 } 651 } 652 653 address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot) { 654 // Find reloc. information containing this call-site 655 RelocIterator iter((nmethod*)NULL, instruction); 656 while (iter.next()) { 657 if (iter.addr() == instruction) { 658 switch(iter.type()) { 659 case relocInfo::static_call_type: 660 return iter.static_call_reloc()->static_stub(is_aot); 661 // We check here for opt_virtual_call_type, since we reuse the code 662 // from the CompiledIC implementation 663 case relocInfo::opt_virtual_call_type: 664 return iter.opt_virtual_call_reloc()->static_stub(is_aot); 665 case relocInfo::poll_type: 666 case relocInfo::poll_return_type: // A safepoint can't overlap a call. 667 default: 668 ShouldNotReachHere(); 669 } 670 } 671 } 672 return NULL; 673 } 674 675 address CompiledDirectStaticCall::find_stub(bool is_aot) { 676 return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot); 677 } 678 679 address CompiledDirectStaticCall::resolve_call_stub() const { 680 return SharedRuntime::get_resolve_static_call_stub(); 681 } 682 683 //----------------------------------------------------------------------------- 684 // Non-product mode code 685 #ifndef PRODUCT 686 687 void CompiledIC::verify() { 688 _call->verify(); 689 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() 690 || is_optimized() || is_megamorphic(), "sanity check"); 691 } 692 693 void CompiledIC::print() { 694 print_compiled_ic(); 695 tty->cr(); 696 } 697 698 void CompiledIC::print_compiled_ic() { 699 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT, 700 p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value())); 701 } 702 703 void CompiledDirectStaticCall::print() { 704 tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address())); 705 if (is_clean()) { 706 tty->print("clean"); 707 } else if (is_call_to_compiled()) { 708 tty->print("compiled"); 709 } else if (is_call_to_far()) { 710 tty->print("far"); 711 } else if (is_call_to_interpreted()) { 712 tty->print("interpreted"); 713 } 714 tty->cr(); 715 } 716 717 #endif // !PRODUCT