1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/nmethod.hpp" 31 #include "code/vtableStubs.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/linkResolver.hpp" 34 #include "memory/metadataFactory.hpp" 35 #include "memory/oopFactory.hpp" 36 #include "oops/method.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "oops/symbol.hpp" 39 #include "runtime/icache.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "utilities/events.hpp" 43 44 45 // Every time a compiled IC is changed or its type is being accessed, 46 // either the CompiledIC_lock must be set or we must be at a safe point. 47 48 //----------------------------------------------------------------------------- 49 // Low-level access to an inline cache. Private, since they might not be 50 // MT-safe to use. 51 52 void* CompiledIC::cached_value() const { 53 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 54 assert (!is_optimized(), "an optimized virtual call does not have a cached metadata"); 55 56 if (!is_in_transition_state()) { 57 void* data = (void*)_value->data(); 58 // If we let the metadata value here be initialized to zero... 59 assert(data != NULL || Universe::non_oop_word() == NULL, 60 "no raw nulls in CompiledIC metadatas, because of patching races"); 61 return (data == (void*)Universe::non_oop_word()) ? NULL : data; 62 } else { 63 return InlineCacheBuffer::cached_value_for((CompiledIC *)this); 64 } 65 } 66 67 68 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) { 69 assert(entry_point != NULL, "must set legal entry point"); 70 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 71 assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata"); 72 assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata"); 73 74 assert(!is_icholder || is_icholder_entry(entry_point), "must be"); 75 76 // Don't use ic_destination for this test since that forwards 77 // through ICBuffer instead of returning the actual current state of 78 // the CompiledIC. 79 if (is_icholder_entry(_ic_call->destination())) { 80 // When patching for the ICStub case the cached value isn't 81 // overwritten until the ICStub copied into the CompiledIC during 82 // the next safepoint. Make sure that the CompiledICHolder* is 83 // marked for release at this point since it won't be identifiable 84 // once the entry point is overwritten. 85 InlineCacheBuffer::queue_for_release((CompiledICHolder*)_value->data()); 86 } 87 88 if (TraceCompiledIC) { 89 tty->print(" "); 90 print_compiled_ic(); 91 tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point)); 92 if (!is_optimized()) { 93 tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache)); 94 } 95 if (is_icstub) { 96 tty->print(" (icstub)"); 97 } 98 tty->cr(); 99 } 100 101 { 102 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); 103 #ifdef ASSERT 104 CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call); 105 assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); 106 #endif 107 _ic_call->set_destination_mt_safe(entry_point); 108 } 109 110 if (is_optimized() || is_icstub) { 111 // Optimized call sites don't have a cache value and ICStub call 112 // sites only change the entry point. Changing the value in that 113 // case could lead to MT safety issues. 114 assert(cache == NULL, "must be null"); 115 return; 116 } 117 118 if (cache == NULL) cache = (void*)Universe::non_oop_word(); 119 120 _value->set_data((intptr_t)cache); 121 } 122 123 124 void CompiledIC::set_ic_destination(ICStub* stub) { 125 internal_set_ic_destination(stub->code_begin(), true, NULL, false); 126 } 127 128 129 130 address CompiledIC::ic_destination() const { 131 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 132 if (!is_in_transition_state()) { 133 return _ic_call->destination(); 134 } else { 135 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this); 136 } 137 } 138 139 140 bool CompiledIC::is_in_transition_state() const { 141 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 142 return InlineCacheBuffer::contains(_ic_call->destination()); 143 } 144 145 146 bool CompiledIC::is_icholder_call() const { 147 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 148 return !_is_optimized && is_icholder_entry(ic_destination()); 149 } 150 151 // Returns native address of 'call' instruction in inline-cache. Used by 152 // the InlineCacheBuffer when it needs to find the stub. 153 address CompiledIC::stub_address() const { 154 assert(is_in_transition_state(), "should only be called when we are in a transition state"); 155 return _ic_call->destination(); 156 } 157 158 // Clears the IC stub if the compiled IC is in transition state 159 void CompiledIC::clear_ic_stub() { 160 if (is_in_transition_state()) { 161 ICStub* stub = ICStub_from_destination_address(stub_address()); 162 stub->clear(); 163 } 164 } 165 166 167 //----------------------------------------------------------------------------- 168 // High-level access to an inline cache. Guaranteed to be MT-safe. 169 170 void CompiledIC::initialize_from_iter(RelocIterator* iter) { 171 assert(iter->addr() == _ic_call->instruction_address(), "must find ic_call"); 172 173 if (iter->type() == relocInfo::virtual_call_type) { 174 virtual_call_Relocation* r = iter->virtual_call_reloc(); 175 _is_optimized = false; 176 _value = nativeMovConstReg_at(r->cached_value()); 177 } else { 178 assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); 179 _is_optimized = true; 180 _value = NULL; 181 } 182 } 183 184 CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) 185 : _ic_call(call) 186 { 187 address ic_call = _ic_call->instruction_address(); 188 189 assert(ic_call != NULL, "ic_call address must be set"); 190 assert(nm != NULL, "must pass nmethod"); 191 assert(nm->contains(ic_call), "must be in nmethod"); 192 193 // Search for the ic_call at the given address. 194 RelocIterator iter(nm, ic_call, ic_call+1); 195 bool ret = iter.next(); 196 assert(ret == true, "relocInfo must exist at this address"); 197 assert(iter.addr() == ic_call, "must find ic_call"); 198 199 initialize_from_iter(&iter); 200 } 201 202 CompiledIC::CompiledIC(RelocIterator* iter) 203 : _ic_call(nativeCall_at(iter->addr())) 204 { 205 address ic_call = _ic_call->instruction_address(); 206 207 nmethod* nm = iter->code(); 208 assert(ic_call != NULL, "ic_call address must be set"); 209 assert(nm != NULL, "must pass nmethod"); 210 assert(nm->contains(ic_call), "must be in nmethod"); 211 212 initialize_from_iter(iter); 213 } 214 215 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { 216 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 217 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); 218 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); 219 220 address entry; 221 if (call_info->call_kind() == CallInfo::itable_call) { 222 assert(bytecode == Bytecodes::_invokeinterface, ""); 223 int itable_index = call_info->itable_index(); 224 entry = VtableStubs::find_itable_stub(itable_index); 225 if (entry == false) { 226 return false; 227 } 228 #ifdef ASSERT 229 int index = call_info->resolved_method()->itable_index(); 230 assert(index == itable_index, "CallInfo pre-computes this"); 231 #endif //ASSERT 232 InstanceKlass* k = call_info->resolved_method()->method_holder(); 233 assert(k->verify_itable_index(itable_index), "sanity check"); 234 InlineCacheBuffer::create_transition_stub(this, k, entry); 235 } else { 236 assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable"); 237 // Can be different than selected_method->vtable_index(), due to package-private etc. 238 int vtable_index = call_info->vtable_index(); 239 assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check"); 240 entry = VtableStubs::find_vtable_stub(vtable_index); 241 if (entry == NULL) { 242 return false; 243 } 244 InlineCacheBuffer::create_transition_stub(this, NULL, entry); 245 } 246 247 if (TraceICs) { 248 ResourceMark rm; 249 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, 250 p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry)); 251 } 252 253 // We can't check this anymore. With lazy deopt we could have already 254 // cleaned this IC entry before we even return. This is possible if 255 // we ran out of space in the inline cache buffer trying to do the 256 // set_next and we safepointed to free up space. This is a benign 257 // race because the IC entry was complete when we safepointed so 258 // cleaning it immediately is harmless. 259 // assert(is_megamorphic(), "sanity check"); 260 return true; 261 } 262 263 264 // true if destination is megamorphic stub 265 bool CompiledIC::is_megamorphic() const { 266 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 267 assert(!is_optimized(), "an optimized call cannot be megamorphic"); 268 269 // Cannot rely on cached_value. It is either an interface or a method. 270 return VtableStubs::is_entry_point(ic_destination()); 271 } 272 273 bool CompiledIC::is_call_to_compiled() const { 274 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 275 276 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie 277 // method is guaranteed to still exist, since we only remove methods after all inline caches 278 // has been cleaned up 279 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 280 bool is_monomorphic = (cb != NULL && cb->is_nmethod()); 281 // Check that the cached_value is a klass for non-optimized monomorphic calls 282 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used 283 // for calling directly to vep without using the inline cache (i.e., cached_value == NULL) 284 #ifdef ASSERT 285 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address()); 286 bool is_c1_method = caller->is_compiled_by_c1(); 287 assert( is_c1_method || 288 !is_monomorphic || 289 is_optimized() || 290 !caller->is_alive() || 291 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check"); 292 #endif // ASSERT 293 return is_monomorphic; 294 } 295 296 297 bool CompiledIC::is_call_to_interpreted() const { 298 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 299 // Call to interpreter if destination is either calling to a stub (if it 300 // is optimized), or calling to an I2C blob 301 bool is_call_to_interpreted = false; 302 if (!is_optimized()) { 303 // must use unsafe because the destination can be a zombie (and we're cleaning) 304 // and the print_compiled_ic code wants to know if site (in the non-zombie) 305 // is to the interpreter. 306 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 307 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob()); 308 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check"); 309 } else { 310 // Check if we are calling into our own codeblob (i.e., to a stub) 311 CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address()); 312 address dest = ic_destination(); 313 #ifdef ASSERT 314 { 315 CodeBlob* db = CodeCache::find_blob_unsafe(dest); 316 assert(!db->is_adapter_blob(), "must use stub!"); 317 } 318 #endif /* ASSERT */ 319 is_call_to_interpreted = cb->contains(dest); 320 } 321 return is_call_to_interpreted; 322 } 323 324 325 void CompiledIC::set_to_clean(bool in_use) { 326 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); 327 if (TraceInlineCacheClearing || TraceICs) { 328 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address())); 329 print(); 330 } 331 332 address entry; 333 if (is_optimized()) { 334 entry = SharedRuntime::get_resolve_opt_virtual_call_stub(); 335 } else { 336 entry = SharedRuntime::get_resolve_virtual_call_stub(); 337 } 338 339 // A zombie transition will always be safe, since the metadata has already been set to NULL, so 340 // we only need to patch the destination 341 bool safe_transition = !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); 342 343 if (safe_transition) { 344 // Kill any leftover stub we might have too 345 clear_ic_stub(); 346 if (is_optimized()) { 347 set_ic_destination(entry); 348 } else { 349 set_ic_destination_and_value(entry, (void*)NULL); 350 } 351 } else { 352 // Unsafe transition - create stub. 353 InlineCacheBuffer::create_transition_stub(this, NULL, entry); 354 } 355 // We can't check this anymore. With lazy deopt we could have already 356 // cleaned this IC entry before we even return. This is possible if 357 // we ran out of space in the inline cache buffer trying to do the 358 // set_next and we safepointed to free up space. This is a benign 359 // race because the IC entry was complete when we safepointed so 360 // cleaning it immediately is harmless. 361 // assert(is_clean(), "sanity check"); 362 } 363 364 365 bool CompiledIC::is_clean() const { 366 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 367 bool is_clean = false; 368 address dest = ic_destination(); 369 is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() || 370 dest == SharedRuntime::get_resolve_virtual_call_stub(); 371 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check"); 372 return is_clean; 373 } 374 375 376 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) { 377 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 378 // Updating a cache to the wrong entry can cause bugs that are very hard 379 // to track down - if cache entry gets invalid - we just clean it. In 380 // this way it is always the same code path that is responsible for 381 // updating and resolving an inline cache 382 // 383 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized 384 // callsites. In addition ic_miss code will update a site to monomorphic if it determines 385 // that an monomorphic call to the interpreter can now be monomorphic to compiled code. 386 // 387 // In both of these cases the only thing being modifed is the jump/call target and these 388 // transitions are mt_safe 389 390 Thread *thread = Thread::current(); 391 if (info.to_interpreter()) { 392 // Call to interpreter 393 if (info.is_optimized() && is_optimized()) { 394 assert(is_clean(), "unsafe IC path"); 395 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 396 // the call analysis (callee structure) specifies that the call is optimized 397 // (either because of CHA or the static target is final) 398 // At code generation time, this call has been emitted as static call 399 // Call via stub 400 assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check"); 401 CompiledStaticCall* csc = compiledStaticCall_at(instruction_address()); 402 methodHandle method (thread, (Method*)info.cached_metadata()); 403 csc->set_to_interpreted(method, info.entry()); 404 if (TraceICs) { 405 ResourceMark rm(thread); 406 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s", 407 p2i(instruction_address()), 408 method->print_value_string()); 409 } 410 } else { 411 // Call via method-klass-holder 412 InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry()); 413 if (TraceICs) { 414 ResourceMark rm(thread); 415 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address())); 416 } 417 } 418 } else { 419 // Call to compiled code 420 bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL); 421 #ifdef ASSERT 422 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); 423 assert (cb->is_nmethod(), "must be compiled!"); 424 #endif /* ASSERT */ 425 426 // This is MT safe if we come from a clean-cache and go through a 427 // non-verified entry point 428 bool safe = SafepointSynchronize::is_at_safepoint() || 429 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); 430 431 if (!safe) { 432 InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry()); 433 } else { 434 if (is_optimized()) { 435 set_ic_destination(info.entry()); 436 } else { 437 set_ic_destination_and_value(info.entry(), info.cached_metadata()); 438 } 439 } 440 441 if (TraceICs) { 442 ResourceMark rm(thread); 443 assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be"); 444 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s", 445 p2i(instruction_address()), 446 ((Klass*)info.cached_metadata())->print_value_string(), 447 (safe) ? "" : "via stub"); 448 } 449 } 450 // We can't check this anymore. With lazy deopt we could have already 451 // cleaned this IC entry before we even return. This is possible if 452 // we ran out of space in the inline cache buffer trying to do the 453 // set_next and we safepointed to free up space. This is a benign 454 // race because the IC entry was complete when we safepointed so 455 // cleaning it immediately is harmless. 456 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); 457 } 458 459 460 // is_optimized: Compiler has generated an optimized call (i.e., no inline 461 // cache) static_bound: The call can be static bound (i.e, no need to use 462 // inline cache) 463 void CompiledIC::compute_monomorphic_entry(methodHandle method, 464 KlassHandle receiver_klass, 465 bool is_optimized, 466 bool static_bound, 467 CompiledICInfo& info, 468 TRAPS) { 469 nmethod* method_code = method->code(); 470 address entry = NULL; 471 if (method_code != NULL && method_code->is_in_use()) { 472 // Call to compiled code 473 if (static_bound || is_optimized) { 474 entry = method_code->verified_entry_point(); 475 } else { 476 entry = method_code->entry_point(); 477 } 478 } 479 if (entry != NULL) { 480 // Call to compiled code 481 info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized); 482 } else { 483 // Note: the following problem exists with Compiler1: 484 // - at compile time we may or may not know if the destination is final 485 // - if we know that the destination is final, we will emit an optimized 486 // virtual call (no inline cache), and need a Method* to make a call 487 // to the interpreter 488 // - if we do not know if the destination is final, we emit a standard 489 // virtual call, and use CompiledICHolder to call interpreted code 490 // (no static call stub has been generated) 491 // However in that case we will now notice it is static_bound 492 // and convert the call into what looks to be an optimized 493 // virtual call. This causes problems in verifying the IC because 494 // it look vanilla but is optimized. Code in is_call_to_interpreted 495 // is aware of this and weakens its asserts. 496 497 // static_bound should imply is_optimized -- otherwise we have a 498 // performance bug (statically-bindable method is called via 499 // dynamically-dispatched call note: the reverse implication isn't 500 // necessarily true -- the call may have been optimized based on compiler 501 // analysis (static_bound is only based on "final" etc.) 502 #ifdef COMPILER2 503 #ifdef TIERED 504 #if defined(ASSERT) 505 // can't check the assert because we don't have the CompiledIC with which to 506 // find the address if the call instruction. 507 // 508 // CodeBlob* cb = find_blob_unsafe(instruction_address()); 509 // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized"); 510 #endif // ASSERT 511 #else 512 assert(!static_bound || is_optimized, "static_bound should imply is_optimized"); 513 #endif // TIERED 514 #endif // COMPILER2 515 if (is_optimized) { 516 // Use stub entry 517 info.set_interpreter_entry(method()->get_c2i_entry(), method()); 518 } else { 519 // Use icholder entry 520 CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass()); 521 info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder); 522 } 523 } 524 assert(info.is_optimized() == is_optimized, "must agree"); 525 } 526 527 528 bool CompiledIC::is_icholder_entry(address entry) { 529 CodeBlob* cb = CodeCache::find_blob_unsafe(entry); 530 return (cb != NULL && cb->is_adapter_blob()); 531 } 532 533 // ---------------------------------------------------------------------------- 534 535 void CompiledStaticCall::set_to_clean() { 536 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 537 // Reset call site 538 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); 539 #ifdef ASSERT 540 CodeBlob* cb = CodeCache::find_blob_unsafe(this); 541 assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); 542 #endif 543 set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub()); 544 545 // Do not reset stub here: It is too expensive to call find_stub. 546 // Instead, rely on caller (nmethod::clear_inline_caches) to clear 547 // both the call and its stub. 548 } 549 550 551 bool CompiledStaticCall::is_clean() const { 552 return destination() == SharedRuntime::get_resolve_static_call_stub(); 553 } 554 555 bool CompiledStaticCall::is_call_to_compiled() const { 556 return CodeCache::contains(destination()); 557 } 558 559 560 bool CompiledStaticCall::is_call_to_interpreted() const { 561 // It is a call to interpreted, if it calls to a stub. Hence, the destination 562 // must be in the stub part of the nmethod that contains the call 563 nmethod* nm = CodeCache::find_nmethod(instruction_address()); 564 return nm->stub_contains(destination()); 565 } 566 567 void CompiledStaticCall::set(const StaticCallInfo& info) { 568 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 569 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 570 // Updating a cache to the wrong entry can cause bugs that are very hard 571 // to track down - if cache entry gets invalid - we just clean it. In 572 // this way it is always the same code path that is responsible for 573 // updating and resolving an inline cache 574 assert(is_clean(), "do not update a call entry - use clean"); 575 576 if (info._to_interpreter) { 577 // Call to interpreted code 578 set_to_interpreted(info.callee(), info.entry()); 579 } else { 580 if (TraceICs) { 581 ResourceMark rm; 582 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, 583 p2i(instruction_address()), 584 p2i(info.entry())); 585 } 586 // Call to compiled code 587 assert (CodeCache::contains(info.entry()), "wrong entry point"); 588 set_destination_mt_safe(info.entry()); 589 } 590 } 591 592 593 // Compute settings for a CompiledStaticCall. Since we might have to set 594 // the stub when calling to the interpreter, we need to return arguments. 595 void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) { 596 nmethod* m_code = m->code(); 597 info._callee = m; 598 if (m_code != NULL && m_code->is_in_use()) { 599 info._to_interpreter = false; 600 info._entry = m_code->verified_entry_point(); 601 } else { 602 // Callee is interpreted code. In any case entering the interpreter 603 // puts a converter-frame on the stack to save arguments. 604 assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics"); 605 info._to_interpreter = true; 606 info._entry = m()->get_c2i_entry(); 607 } 608 } 609 610 address CompiledStaticCall::find_stub() { 611 // Find reloc. information containing this call-site 612 RelocIterator iter((nmethod*)NULL, instruction_address()); 613 while (iter.next()) { 614 if (iter.addr() == instruction_address()) { 615 switch(iter.type()) { 616 case relocInfo::static_call_type: 617 return iter.static_call_reloc()->static_stub(); 618 // We check here for opt_virtual_call_type, since we reuse the code 619 // from the CompiledIC implementation 620 case relocInfo::opt_virtual_call_type: 621 return iter.opt_virtual_call_reloc()->static_stub(); 622 case relocInfo::poll_type: 623 case relocInfo::poll_return_type: // A safepoint can't overlap a call. 624 default: 625 ShouldNotReachHere(); 626 } 627 } 628 } 629 return NULL; 630 } 631 632 633 //----------------------------------------------------------------------------- 634 // Non-product mode code 635 #ifndef PRODUCT 636 637 void CompiledIC::verify() { 638 // make sure code pattern is actually a call imm32 instruction 639 _ic_call->verify(); 640 if (os::is_MP()) { 641 _ic_call->verify_alignment(); 642 } 643 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() 644 || is_optimized() || is_megamorphic(), "sanity check"); 645 } 646 647 void CompiledIC::print() { 648 print_compiled_ic(); 649 tty->cr(); 650 } 651 652 void CompiledIC::print_compiled_ic() { 653 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT, 654 p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value())); 655 } 656 657 void CompiledStaticCall::print() { 658 tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address())); 659 if (is_clean()) { 660 tty->print("clean"); 661 } else if (is_call_to_compiled()) { 662 tty->print("compiled"); 663 } else if (is_call_to_interpreted()) { 664 tty->print("interpreted"); 665 } 666 tty->cr(); 667 } 668 669 #endif // !PRODUCT