1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/nmethod.hpp" 31 #include "code/vtableStubs.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/linkResolver.hpp" 34 #include "memory/oopFactory.hpp" 35 #include "oops/methodOop.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "oops/symbol.hpp" 38 #include "runtime/icache.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "utilities/events.hpp" 42 43 44 // Every time a compiled IC is changed or its type is being accessed, 45 // either the CompiledIC_lock must be set or we must be at a safe point. 46 47 //----------------------------------------------------------------------------- 48 // Low-level access to an inline cache. Private, since they might not be 49 // MT-safe to use. 50 51 void CompiledIC::set_cached_oop(oop cache) { 52 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 53 assert (!is_optimized(), "an optimized virtual call does not have a cached oop"); 54 assert (cache == NULL || cache != badOop, "invalid oop"); 55 56 if (TraceCompiledIC) { 57 tty->print(" "); 58 print_compiled_ic(); 59 tty->print_cr(" changing oop to " INTPTR_FORMAT, (address)cache); 60 } 61 62 if (cache == NULL) cache = (oop)Universe::non_oop_word(); 63 64 *_oop_addr = cache; 65 // fix up the relocations 66 RelocIterator iter = _oops; 67 while (iter.next()) { 68 if (iter.type() == relocInfo::oop_type) { 69 oop_Relocation* r = iter.oop_reloc(); 70 if (r->oop_addr() == _oop_addr) 71 r->fix_oop_relocation(); 72 } 73 } 74 return; 75 } 76 77 78 oop CompiledIC::cached_oop() const { 79 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 80 assert (!is_optimized(), "an optimized virtual call does not have a cached oop"); 81 82 if (!is_in_transition_state()) { 83 oop data = *_oop_addr; 84 // If we let the oop value here be initialized to zero... 85 assert(data != NULL || Universe::non_oop_word() == NULL, 86 "no raw nulls in CompiledIC oops, because of patching races"); 87 return (data == (oop)Universe::non_oop_word()) ? (oop)NULL : data; 88 } else { 89 return InlineCacheBuffer::cached_oop_for((CompiledIC *)this); 90 } 91 } 92 93 94 void CompiledIC::set_ic_destination(address entry_point, bool set_profiled) { 95 assert(entry_point != NULL, "must set legal entry point"); 96 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 97 if (TraceCompiledIC) { 98 tty->print(" "); 99 print_compiled_ic(); 100 tty->print_cr(" changing destination to " INTPTR_FORMAT, entry_point); 101 } 102 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 103 #ifdef ASSERT 104 CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call); 105 assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); 106 #endif 107 if (set_profiled) { 108 if (is_profiled()) { 109 assert(SafepointSynchronize::is_at_safepoint(), "unsafe if not at safepoint"); 110 int off = *(int*)(_ic_call->destination() - 4); // first word of profile call stub is offset 111 NativeJump* jump = nativeJump_at(_ic_call->destination() + off - 4); 112 jump->set_jump_destination(entry_point); 113 } else { 114 set_up_profiling(entry_point); 115 } 116 } else { 117 _ic_call->set_destination_mt_safe(entry_point); 118 } 119 } 120 121 122 address CompiledIC::ic_destination() const { 123 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 124 if (!is_in_transition_state()) { 125 if (is_profiled()) { 126 return profile_target(); 127 } else { 128 return _ic_call->destination(); 129 } 130 } else { 131 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this); 132 } 133 } 134 135 136 bool CompiledIC::is_in_transition_state() const { 137 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 138 if (is_profiled()) { 139 return InlineCacheBuffer::contains(profile_target()); 140 } 141 return InlineCacheBuffer::contains(_ic_call->destination()); 142 } 143 144 145 // Returns native address of 'call' instruction in inline-cache. Used by 146 // the InlineCacheBuffer when it needs to find the stub. 147 address CompiledIC::stub_address() const { 148 assert(is_in_transition_state(), "should only be called when we are in a transition state"); 149 if (is_profiled()) { 150 return profile_target(); 151 } 152 return _ic_call->destination(); 153 } 154 155 bool CompiledProfile::is_profiled() const { 156 return is_profiled(call_instr()); 157 } 158 159 bool CompiledProfile::is_profiled(NativeCall* call) { 160 #ifdef COMPILER2 161 return false; 162 #endif 163 #ifdef COMPILER1 164 if (!C1ProfileInlining) return false; 165 166 CodeBlob* cb = CodeCache::find_blob(call->instruction_address()); 167 if (cb == NULL || !cb->is_nmethod() || !((nmethod*)cb)->stub_contains(call->destination())) { 168 return false; 169 } 170 // The call points to this nmethod's stub area so it either branches 171 // to a static call stub or a profile call stub. Both start with a 172 // NativeMovConstReg. If this instruction loads an MDO then the 173 // target is a profile call stub. 174 oop obj = NULL; 175 #if defined(SPARC) || defined(PPC) || defined(ARM) 176 if (SafepointSynchronize::is_at_safepoint()) { 177 // When called at a safepoint the reloc info may have been updated 178 // if the oop that the instruction loads was moved by the GC but 179 // the code itself is not necessarily up to date. 180 address addr = call->destination(); 181 RelocIterator oops((nmethod*)cb, addr, addr + 1); 182 while (oops.next()) { 183 if (oops.type() == relocInfo::oop_type) { 184 oop_Relocation* r = oops.oop_reloc(); 185 obj = r->oop_value(); 186 break; 187 } 188 } 189 #ifdef ARM 190 // The first word of a static call stub has no reloc info. So if 191 // we don't find a reloc info we know, it's a static call stub. 192 return obj != NULL; 193 #endif 194 } else 195 #endif 196 { 197 NativeMovConstReg* mov = nativeMovConstReg_at(call->destination()); 198 199 intptr_t data = mov->data(); 200 obj = (oop)data; 201 } 202 203 return obj->is_methodData(); 204 #endif 205 } 206 207 address CompiledProfile::profile_target() const { 208 return profile_target(call_instr()); 209 } 210 211 address CompiledProfile::profile_target(NativeCall* call) { 212 assert(is_profiled(call), "only for profiled call sites"); 213 int off = *(int*)(call->destination() - 4); // first word of profile call stub is offset 214 NativeJump* jump = nativeJump_at(call->destination() + off - 4); 215 return jump->jump_destination(); 216 } 217 218 address CompiledProfile::find_profile_stub() const { 219 return find_profile_stub(call_instr()); 220 } 221 222 address CompiledProfile::find_profile_stub(NativeCall* call) { 223 // Profile stub is right after the static stub. So find the static 224 // stub, walk through it and we are on the profile stub. 225 CompiledStaticCall* csc = compiledStaticCall_at(call->instruction_address()); 226 address static_stub = csc->find_stub(); 227 228 NativeMovConstReg* method_holder = nativeMovConstReg_at(static_stub); 229 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); 230 231 // on arm, an extra word is emitted after the branch 232 return jump->next_instruction_address() ARM_ONLY( + 4 ); 233 } 234 235 void CompiledProfile::set_up_profiling(address entry_point) { 236 address stub = find_profile_stub(); 237 set_up_profiling(call_instr(), stub, entry_point); 238 } 239 240 void CompiledProfile::set_up_profiling(NativeCall* call, address stub, address entry_point) { 241 assert(Patching_lock->is_locked(), "should be protected by Patching_lock"); 242 243 address jmp_addr = stub + *(int*)stub; 244 NativeJump* jump = nativeJump_at(jmp_addr); 245 jump->set_jump_destination(entry_point); 246 call->set_destination_mt_safe(stub+4); // first word of profile call stub is offset 247 } 248 249 bool CompiledProfile::is_call_to_stub(NativeCall* call, address stub) { 250 return call->destination() == stub + 4; // first word of profile call stub is offset 251 } 252 //----------------------------------------------------------------------------- 253 // High-level access to an inline cache. Guaranteed to be MT-safe. 254 255 256 void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { 257 methodHandle method = call_info->selected_method(); 258 bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index()); 259 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 260 assert(method->is_oop(), "cannot be NULL and must be oop"); 261 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); 262 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); 263 264 address entry; 265 if (is_invoke_interface) { 266 int index = klassItable::compute_itable_index(call_info->resolved_method()()); 267 entry = VtableStubs::create_stub(false, index, method()); 268 assert(entry != NULL, "entry not computed"); 269 klassOop k = call_info->resolved_method()->method_holder(); 270 assert(Klass::cast(k)->is_interface(), "sanity check"); 271 InlineCacheBuffer::create_transition_stub(this, k, entry); 272 } else { 273 // Can be different than method->vtable_index(), due to package-private etc. 274 int vtable_index = call_info->vtable_index(); 275 entry = VtableStubs::create_stub(true, vtable_index, method()); 276 InlineCacheBuffer::create_transition_stub(this, method(), entry); 277 } 278 279 if (TraceICs) { 280 ResourceMark rm; 281 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, 282 instruction_address(), method->print_value_string(), entry); 283 } 284 285 Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, (address)method()); 286 // We can't check this anymore. With lazy deopt we could have already 287 // cleaned this IC entry before we even return. This is possible if 288 // we ran out of space in the inline cache buffer trying to do the 289 // set_next and we safepointed to free up space. This is a benign 290 // race because the IC entry was complete when we safepointed so 291 // cleaning it immediately is harmless. 292 // assert(is_megamorphic(), "sanity check"); 293 } 294 295 296 // true if destination is megamorphic stub 297 bool CompiledIC::is_megamorphic() const { 298 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 299 assert(!is_optimized(), "an optimized call cannot be megamorphic"); 300 301 // Cannot rely on cached_oop. It is either an interface or a method. 302 return VtableStubs::is_entry_point(ic_destination()); 303 } 304 305 bool CompiledIC::is_call_to_compiled() const { 306 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 307 308 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie 309 // method is guaranteed to still exist, since we only remove methods after all inline caches 310 // has been cleaned up 311 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 312 bool is_monomorphic = (cb != NULL && cb->is_nmethod()); 313 // Check that the cached_oop is a klass for non-optimized monomorphic calls 314 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used 315 // for calling directly to vep without using the inline cache (i.e., cached_oop == NULL) 316 #ifdef ASSERT 317 #ifdef TIERED 318 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address()); 319 bool is_c1_method = caller->is_compiled_by_c1(); 320 #else 321 #ifdef COMPILER1 322 bool is_c1_method = true; 323 #else 324 bool is_c1_method = false; 325 #endif // COMPILER1 326 #endif // TIERED 327 assert( is_c1_method || 328 !is_monomorphic || 329 is_optimized() || 330 (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check"); 331 #endif // ASSERT 332 return is_monomorphic; 333 } 334 335 336 bool CompiledIC::is_call_to_interpreted() const { 337 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 338 // Call to interpreter if destination is either calling to a stub (if it 339 // is optimized), or calling to an I2C blob 340 bool is_call_to_interpreted = false; 341 if (!is_optimized()) { 342 // must use unsafe because the destination can be a zombie (and we're cleaning) 343 // and the print_compiled_ic code wants to know if site (in the non-zombie) 344 // is to the interpreter. 345 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 346 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob()); 347 assert(!is_call_to_interpreted || (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check"); 348 } else { 349 // Check if we are calling into our own codeblob (i.e., to a stub) 350 CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address()); 351 address dest = ic_destination(); 352 #ifdef ASSERT 353 { 354 CodeBlob* db = CodeCache::find_blob_unsafe(dest); 355 assert(!db->is_adapter_blob(), "must use stub!"); 356 } 357 #endif /* ASSERT */ 358 is_call_to_interpreted = cb->contains(dest); 359 } 360 return is_call_to_interpreted; 361 } 362 363 364 void CompiledIC::set_to_clean() { 365 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); 366 if (TraceInlineCacheClearing || TraceICs) { 367 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address()); 368 print(); 369 } 370 371 address entry; 372 if (is_optimized()) { 373 entry = SharedRuntime::get_resolve_opt_virtual_call_stub(); 374 } else { 375 entry = SharedRuntime::get_resolve_virtual_call_stub(); 376 } 377 378 // A zombie transition will always be safe, since the oop has already been set to NULL, so 379 // we only need to patch the destination 380 bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint(); 381 382 if (safe_transition) { 383 if (!is_optimized()) set_cached_oop(NULL); 384 // Kill any leftover stub we might have too 385 if (is_in_transition_state()) { 386 ICStub* old_stub = ICStub_from_destination_address(stub_address()); 387 old_stub->clear(); 388 } 389 set_ic_destination(entry, false); 390 } else { 391 // Unsafe transition - create stub. 392 InlineCacheBuffer::create_transition_stub(this, NULL, entry); 393 } 394 // We can't check this anymore. With lazy deopt we could have already 395 // cleaned this IC entry before we even return. This is possible if 396 // we ran out of space in the inline cache buffer trying to do the 397 // set_next and we safepointed to free up space. This is a benign 398 // race because the IC entry was complete when we safepointed so 399 // cleaning it immediately is harmless. 400 // assert(is_clean(), "sanity check"); 401 } 402 403 404 bool CompiledIC::is_clean() const { 405 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 406 bool is_clean = false; 407 address dest = ic_destination(); 408 is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() || 409 dest == SharedRuntime::get_resolve_virtual_call_stub(); 410 assert(!is_clean || is_optimized() || cached_oop() == NULL, "sanity check"); 411 return is_clean; 412 } 413 414 415 void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) { 416 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 417 // Updating a cache to the wrong entry can cause bugs that are very hard 418 // to track down - if cache entry gets invalid - we just clean it. In 419 // this way it is always the same code path that is responsible for 420 // updating and resolving an inline cache 421 // 422 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized 423 // callsites. In addition ic_miss code will update a site to monomorphic if it determines 424 // that an monomorphic call to the interpreter can now be monomorphic to compiled code. 425 // 426 // In both of these cases the only thing being modifed is the jump/call target and these 427 // transitions are mt_safe 428 429 Thread *thread = Thread::current(); 430 if (info._to_interpreter) { 431 // Call to interpreter 432 if (info.is_optimized() && is_optimized()) { 433 assert(is_clean(), "unsafe IC path"); 434 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 435 // the call analysis (callee structure) specifies that the call is optimized 436 // (either because of CHA or the static target is final) 437 // At code generation time, this call has been emitted as static call 438 // Call via stub 439 assert(info.cached_oop().not_null() && info.cached_oop()->is_method(), "sanity check"); 440 CompiledStaticCall* csc = compiledStaticCall_at(instruction_address()); 441 methodHandle method (thread, (methodOop)info.cached_oop()()); 442 csc->set_to_interpreted(method, info.entry()); 443 if (TraceICs) { 444 ResourceMark rm(thread); 445 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s", 446 instruction_address(), 447 method->print_value_string()); 448 } 449 } else { 450 // Call via method-klass-holder 451 assert(info.cached_oop().not_null(), "must be set"); 452 InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry()); 453 454 if (TraceICs) { 455 ResourceMark rm(thread); 456 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via mkh", instruction_address()); 457 } 458 } 459 } else { 460 // Call to compiled code 461 bool static_bound = info.is_optimized() || (info.cached_oop().is_null()); 462 #ifdef ASSERT 463 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); 464 assert (cb->is_nmethod(), "must be compiled!"); 465 #endif /* ASSERT */ 466 467 // This is MT safe if we come from a clean-cache and go through a 468 // non-verified entry point 469 bool safe = SafepointSynchronize::is_at_safepoint() || 470 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); 471 472 if (!safe) { 473 InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry(), info.is_profiled()); 474 } else { 475 set_ic_destination(info.entry(), info.is_profiled()); 476 if (!is_optimized()) set_cached_oop(info.cached_oop()()); 477 } 478 479 if (TraceICs) { 480 ResourceMark rm(thread); 481 assert(info.cached_oop() == NULL || info.cached_oop()()->is_klass(), "must be"); 482 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s", 483 instruction_address(), 484 ((klassOop)info.cached_oop()())->print_value_string(), 485 (safe) ? "" : "via stub"); 486 } 487 } 488 // We can't check this anymore. With lazy deopt we could have already 489 // cleaned this IC entry before we even return. This is possible if 490 // we ran out of space in the inline cache buffer trying to do the 491 // set_next and we safepointed to free up space. This is a benign 492 // race because the IC entry was complete when we safepointed so 493 // cleaning it immediately is harmless. 494 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); 495 } 496 497 498 // is_optimized: Compiler has generated an optimized call (i.e., no inline 499 // cache) static_bound: The call can be static bound (i.e, no need to use 500 // inline cache) 501 void CompiledIC::compute_monomorphic_entry(methodHandle method, 502 KlassHandle receiver_klass, 503 bool is_optimized, 504 bool static_bound, 505 CompiledICInfo& info, 506 bool is_profiled, 507 TRAPS) { 508 info._is_optimized = is_optimized; 509 info._is_profiled = is_profiled; 510 511 nmethod* method_code = method->code(); 512 address entry = NULL; 513 if (method_code != NULL) { 514 // Call to compiled code 515 if (static_bound || is_optimized) { 516 entry = method_code->verified_entry_point(); 517 } else { 518 entry = method_code->entry_point(); 519 } 520 } 521 if (entry != NULL) { 522 // Call to compiled code 523 info._entry = entry; 524 if (static_bound || is_optimized) { 525 info._cached_oop = Handle(THREAD, (oop)NULL); 526 } else { 527 info._cached_oop = receiver_klass; 528 } 529 info._to_interpreter = false; 530 } else { 531 // Note: the following problem exists with Compiler1: 532 // - at compile time we may or may not know if the destination is final 533 // - if we know that the destination is final, we will emit an optimized 534 // virtual call (no inline cache), and need a methodOop to make a call 535 // to the interpreter 536 // - if we do not know if the destination is final, we emit a standard 537 // virtual call, and use CompiledICHolder to call interpreted code 538 // (no static call stub has been generated) 539 // However in that case we will now notice it is static_bound 540 // and convert the call into what looks to be an optimized 541 // virtual call. This causes problems in verifying the IC because 542 // it look vanilla but is optimized. Code in is_call_to_interpreted 543 // is aware of this and weakens its asserts. 544 545 info._to_interpreter = true; 546 // static_bound should imply is_optimized -- otherwise we have a 547 // performance bug (statically-bindable method is called via 548 // dynamically-dispatched call note: the reverse implication isn't 549 // necessarily true -- the call may have been optimized based on compiler 550 // analysis (static_bound is only based on "final" etc.) 551 #ifdef COMPILER2 552 #ifdef TIERED 553 #if defined(ASSERT) 554 // can't check the assert because we don't have the CompiledIC with which to 555 // find the address if the call instruction. 556 // 557 // CodeBlob* cb = find_blob_unsafe(instruction_address()); 558 // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized"); 559 #endif // ASSERT 560 #else 561 assert(!static_bound || is_optimized, "static_bound should imply is_optimized"); 562 #endif // TIERED 563 #endif // COMPILER2 564 if (is_optimized) { 565 // Use stub entry 566 info._entry = method()->get_c2i_entry(); 567 info._cached_oop = method; 568 } else { 569 // Use mkh entry 570 oop holder = oopFactory::new_compiledICHolder(method, receiver_klass, CHECK); 571 info._cached_oop = Handle(THREAD, holder); 572 info._entry = method()->get_c2i_unverified_entry(); 573 } 574 } 575 } 576 577 578 inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) { 579 address first_oop = NULL; 580 // Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter. 581 nmethod* tmp_nm = nm; 582 return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized); 583 } 584 585 CompiledIC::CompiledIC(NativeCall* ic_call) 586 : _ic_call(ic_call), 587 _oops(parse_ic(NULL, ic_call->instruction_address(), _oop_addr, &_is_optimized)) 588 { 589 } 590 591 592 CompiledIC::CompiledIC(Relocation* ic_reloc) 593 : _ic_call(nativeCall_at(ic_reloc->addr())), 594 _oops(parse_ic(ic_reloc->code(), ic_reloc->addr(), _oop_addr, &_is_optimized)) 595 { 596 assert(ic_reloc->type() == relocInfo::virtual_call_type || 597 ic_reloc->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); 598 } 599 600 601 void CompiledIC::drop_profiling() { 602 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 603 604 if (is_profiled()) { 605 set_ic_destination(profile_target(), false); 606 } 607 } 608 609 // ---------------------------------------------------------------------------- 610 611 void CompiledStaticCall::set_to_clean() { 612 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 613 // Reset call site 614 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 615 #ifdef ASSERT 616 CodeBlob* cb = CodeCache::find_blob_unsafe(this); 617 assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); 618 #endif 619 set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub()); 620 621 // Do not reset stub here: It is too expensive to call find_stub. 622 // Instead, rely on caller (nmethod::clear_inline_caches) to clear 623 // both the call and its stub. 624 } 625 626 address CompiledStaticCall::destination() const { 627 if (CompiledProfile::is_profiled(call_instr())) { 628 return CompiledProfile::profile_target(call_instr()); 629 } 630 return NativeCall::destination(); 631 } 632 633 bool CompiledStaticCall::is_clean() const { 634 return destination() == SharedRuntime::get_resolve_static_call_stub(); 635 } 636 637 bool CompiledStaticCall::is_call_to_compiled() const { 638 return CodeCache::contains(destination()); 639 } 640 641 642 bool CompiledStaticCall::is_call_to_interpreted() const { 643 // It is a call to interpreted, if it calls to a stub. Hence, the destination 644 // must be in the stub part of the nmethod that contains the call 645 nmethod* nm = CodeCache::find_nmethod(instruction_address()); 646 return nm->stub_contains(destination()); 647 } 648 649 650 void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { 651 address stub=find_stub(); 652 assert(stub!=NULL, "stub not found"); 653 654 if (TraceICs) { 655 ResourceMark rm; 656 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", 657 instruction_address(), 658 callee->name_and_sig_as_C_string()); 659 } 660 661 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object 662 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); 663 664 assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache"); 665 assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache"); 666 667 // Update stub 668 method_holder->set_data((intptr_t)callee()); 669 jump->set_jump_destination(entry); 670 671 // Update jump to call 672 set_destination_mt_safe(stub); 673 } 674 675 676 void CompiledStaticCall::set(const StaticCallInfo& info) { 677 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 678 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 679 // Updating a cache to the wrong entry can cause bugs that are very hard 680 // to track down - if cache entry gets invalid - we just clean it. In 681 // this way it is always the same code path that is responsible for 682 // updating and resolving an inline cache 683 assert(is_clean(), "do not update a call entry - use clean"); 684 685 if (info._to_interpreter) { 686 // Call to interpreted code 687 set_to_interpreted(info.callee(), info.entry()); 688 } else { 689 if (TraceICs) { 690 ResourceMark rm; 691 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, 692 instruction_address(), 693 info.entry()); 694 } 695 // Call to compiled code 696 assert (CodeCache::contains(info.entry()), "wrong entry point"); 697 if (info.is_profiled()) { 698 address stub = CompiledProfile::find_profile_stub(call_instr()); 699 CompiledProfile::set_up_profiling(call_instr(), stub, info.entry()); 700 } else { 701 set_destination_mt_safe(info.entry()); 702 } 703 } 704 } 705 706 707 // Compute settings for a CompiledStaticCall. Since we might have to set 708 // the stub when calling to the interpreter, we need to return arguments. 709 void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info, bool is_profiled) { 710 nmethod* m_code = m->code(); 711 info._callee = m; 712 info._is_profiled = is_profiled; 713 if (m_code != NULL) { 714 info._to_interpreter = false; 715 info._entry = m_code->verified_entry_point(); 716 } else { 717 // Callee is interpreted code. In any case entering the interpreter 718 // puts a converter-frame on the stack to save arguments. 719 info._to_interpreter = true; 720 info._entry = m()->get_c2i_entry(); 721 } 722 } 723 724 725 void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { 726 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 727 // Reset stub 728 address stub = static_stub->addr(); 729 assert(stub!=NULL, "stub not found"); 730 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object 731 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); 732 method_holder->set_data(0); 733 jump->set_jump_destination((address)-1); 734 } 735 736 737 address CompiledStaticCall::find_stub() { 738 // Find reloc. information containing this call-site 739 RelocIterator iter((nmethod*)NULL, instruction_address()); 740 while (iter.next()) { 741 if (iter.addr() == instruction_address()) { 742 switch(iter.type()) { 743 case relocInfo::static_call_type: 744 return iter.static_call_reloc()->static_stub(); 745 // We check here for opt_virtual_call_type, since we reuse the code 746 // from the CompiledIC implementation 747 case relocInfo::opt_virtual_call_type: 748 return iter.opt_virtual_call_reloc()->static_stub(); 749 case relocInfo::virtual_call_type: 750 return iter.virtual_call_reloc()->static_stub(); 751 case relocInfo::poll_type: 752 case relocInfo::poll_return_type: // A safepoint can't overlap a call. 753 default: 754 ShouldNotReachHere(); 755 } 756 } 757 } 758 return NULL; 759 } 760 761 void CompiledStaticCall::drop_profiling() { 762 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 763 764 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 765 766 if (CompiledProfile::is_profiled(call_instr())) { 767 set_destination_mt_safe(CompiledProfile::profile_target(call_instr())); 768 } 769 } 770 771 //----------------------------------------------------------------------------- 772 // Non-product mode code 773 #ifndef PRODUCT 774 775 void CompiledIC::verify() { 776 // make sure code pattern is actually a call imm32 instruction 777 _ic_call->verify(); 778 if (os::is_MP()) { 779 _ic_call->verify_alignment(); 780 } 781 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() 782 || is_optimized() || is_megamorphic(), "sanity check"); 783 } 784 785 786 void CompiledIC::print() { 787 print_compiled_ic(); 788 tty->cr(); 789 } 790 791 792 void CompiledIC::print_compiled_ic() { 793 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT, 794 instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination()); 795 } 796 797 798 void CompiledStaticCall::print() { 799 tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address()); 800 if (is_clean()) { 801 tty->print("clean"); 802 } else if (is_call_to_compiled()) { 803 tty->print("compiled"); 804 } else if (is_call_to_interpreted()) { 805 tty->print("interpreted"); 806 } 807 tty->cr(); 808 } 809 810 void CompiledStaticCall::verify() { 811 // Verify call 812 NativeCall::verify(); 813 if (os::is_MP()) { 814 verify_alignment(); 815 } 816 817 // Verify stub 818 address stub = find_stub(); 819 assert(stub != NULL, "no stub found for static call"); 820 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object 821 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); 822 823 // Verify state 824 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); 825 } 826 827 #endif