1 /* 2 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciCallSite.hpp" 27 #include "ci/ciMethodHandle.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileBroker.hpp" 30 #include "compiler/compileLog.hpp" 31 #include "interpreter/linkResolver.hpp" 32 #include "opto/addnode.hpp" 33 #include "opto/callGenerator.hpp" 34 #include "opto/castnode.hpp" 35 #include "opto/cfgnode.hpp" 36 #include "opto/mulnode.hpp" 37 #include "opto/parse.hpp" 38 #include "opto/rootnode.hpp" 39 #include "opto/runtime.hpp" 40 #include "opto/subnode.hpp" 41 #include "opto/valuetypenode.hpp" 42 #include "prims/nativeLookup.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 45 void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) { 46 if (TraceTypeProfile || C->print_inlining()) { 47 outputStream* out = tty; 48 if (!C->print_inlining()) { 49 if (!PrintOpto && !PrintCompilation) { 50 method->print_short_name(); 51 tty->cr(); 52 } 53 CompileTask::print_inlining_tty(prof_method, depth, bci); 54 } else { 55 out = C->print_inlining_stream(); 56 } 57 CompileTask::print_inline_indent(depth, out); 58 out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count); 59 stringStream ss; 60 prof_klass->name()->print_symbol_on(&ss); 61 out->print("%s", ss.as_string()); 62 out->cr(); 63 } 64 } 65 66 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch, 67 JVMState* jvms, bool allow_inline, 68 float prof_factor, ciKlass* speculative_receiver_type, 69 bool allow_intrinsics, bool delayed_forbidden) { 70 ciMethod* caller = jvms->method(); 71 int bci = jvms->bci(); 72 Bytecodes::Code bytecode = caller->java_code_at_bci(bci); 73 guarantee(callee != NULL, "failed method resolution"); 74 75 // Dtrace currently doesn't work unless all calls are vanilla 76 if (env()->dtrace_method_probes()) { 77 allow_inline = false; 78 } 79 80 // Note: When we get profiling during stage-1 compiles, we want to pull 81 // from more specific profile data which pertains to this inlining. 82 // Right now, ignore the information in jvms->caller(), and do method[bci]. 83 ciCallProfile profile = caller->call_profile_at_bci(bci); 84 85 // See how many times this site has been invoked. 86 int site_count = profile.count(); 87 int receiver_count = -1; 88 if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) { 89 // Receivers in the profile structure are ordered by call counts 90 // so that the most called (major) receiver is profile.receiver(0). 91 receiver_count = profile.receiver_count(0); 92 } 93 94 CompileLog* log = this->log(); 95 if (log != NULL) { 96 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; 97 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1; 98 log->begin_elem("call method='%d' count='%d' prof_factor='%f'", 99 log->identify(callee), site_count, prof_factor); 100 if (call_does_dispatch) log->print(" virtual='1'"); 101 if (allow_inline) log->print(" inline='1'"); 102 if (receiver_count >= 0) { 103 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count); 104 if (profile.has_receiver(1)) { 105 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1)); 106 } 107 } 108 if (callee->is_method_handle_intrinsic()) { 109 log->print(" method_handle_intrinsic='1'"); 110 } 111 log->end_elem(); 112 } 113 114 // Special case the handling of certain common, profitable library 115 // methods. If these methods are replaced with specialized code, 116 // then we return it as the inlined version of the call. 117 // We do this before the strict f.p. check below because the 118 // intrinsics handle strict f.p. correctly. 119 CallGenerator* cg_intrinsic = NULL; 120 if (allow_inline && allow_intrinsics) { 121 CallGenerator* cg = find_intrinsic(callee, call_does_dispatch); 122 if (cg != NULL) { 123 if (cg->is_predicated()) { 124 // Code without intrinsic but, hopefully, inlined. 125 CallGenerator* inline_cg = this->call_generator(callee, 126 vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false); 127 if (inline_cg != NULL) { 128 cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg); 129 } 130 } 131 132 // If intrinsic does the virtual dispatch, we try to use the type profile 133 // first, and hopefully inline it as the regular virtual call below. 134 // We will retry the intrinsic if nothing had claimed it afterwards. 135 if (cg->does_virtual_dispatch()) { 136 cg_intrinsic = cg; 137 cg = NULL; 138 } else { 139 return cg; 140 } 141 } 142 } 143 144 // Do method handle calls. 145 // NOTE: This must happen before normal inlining logic below since 146 // MethodHandle.invoke* are native methods which obviously don't 147 // have bytecodes and so normal inlining fails. 148 if (callee->is_method_handle_intrinsic()) { 149 CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, delayed_forbidden); 150 assert(cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator"); 151 return cg; 152 } 153 154 // Do not inline strict fp into non-strict code, or the reverse 155 if (caller->is_strict() ^ callee->is_strict()) { 156 allow_inline = false; 157 } 158 159 // Attempt to inline... 160 if (allow_inline) { 161 // The profile data is only partly attributable to this caller, 162 // scale back the call site information. 163 float past_uses = jvms->method()->scale_count(site_count, prof_factor); 164 // This is the number of times we expect the call code to be used. 165 float expected_uses = past_uses; 166 167 // Try inlining a bytecoded method: 168 if (!call_does_dispatch) { 169 InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method()); 170 WarmCallInfo scratch_ci; 171 bool should_delay = false; 172 WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay); 173 assert(ci != &scratch_ci, "do not let this pointer escape"); 174 bool allow_inline = (ci != NULL && !ci->is_cold()); 175 bool require_inline = (allow_inline && ci->is_hot()); 176 177 if (allow_inline) { 178 CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses); 179 180 if (require_inline && cg != NULL) { 181 // Delay the inlining of this method to give us the 182 // opportunity to perform some high level optimizations 183 // first. 184 if (should_delay_string_inlining(callee, jvms)) { 185 assert(!delayed_forbidden, "strange"); 186 return CallGenerator::for_string_late_inline(callee, cg); 187 } else if (should_delay_boxing_inlining(callee, jvms)) { 188 assert(!delayed_forbidden, "strange"); 189 return CallGenerator::for_boxing_late_inline(callee, cg); 190 } else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) { 191 return CallGenerator::for_late_inline(callee, cg); 192 } 193 } 194 if (cg == NULL || should_delay) { 195 // Fall through. 196 } else if (require_inline || !InlineWarmCalls) { 197 return cg; 198 } else { 199 CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor); 200 return CallGenerator::for_warm_call(ci, cold_cg, cg); 201 } 202 } 203 } 204 205 // Try using the type profile. 206 if (call_does_dispatch && site_count > 0 && receiver_count > 0) { 207 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count. 208 bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent); 209 ciMethod* receiver_method = NULL; 210 211 int morphism = profile.morphism(); 212 if (speculative_receiver_type != NULL) { 213 if (!too_many_traps(caller, bci, Deoptimization::Reason_speculate_class_check)) { 214 // We have a speculative type, we should be able to resolve 215 // the call. We do that before looking at the profiling at 216 // this invoke because it may lead to bimorphic inlining which 217 // a speculative type should help us avoid. 218 receiver_method = callee->resolve_invoke(jvms->method()->holder(), 219 speculative_receiver_type); 220 if (receiver_method == NULL) { 221 speculative_receiver_type = NULL; 222 } else { 223 morphism = 1; 224 } 225 } else { 226 // speculation failed before. Use profiling at the call 227 // (could allow bimorphic inlining for instance). 228 speculative_receiver_type = NULL; 229 } 230 } 231 if (receiver_method == NULL && 232 (have_major_receiver || morphism == 1 || 233 (morphism == 2 && UseBimorphicInlining))) { 234 // receiver_method = profile.method(); 235 // Profiles do not suggest methods now. Look it up in the major receiver. 236 receiver_method = callee->resolve_invoke(jvms->method()->holder(), 237 profile.receiver(0)); 238 } 239 if (receiver_method != NULL) { 240 // The single majority receiver sufficiently outweighs the minority. 241 CallGenerator* hit_cg = this->call_generator(receiver_method, 242 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor); 243 if (hit_cg != NULL) { 244 // Look up second receiver. 245 CallGenerator* next_hit_cg = NULL; 246 ciMethod* next_receiver_method = NULL; 247 if (morphism == 2 && UseBimorphicInlining) { 248 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(), 249 profile.receiver(1)); 250 if (next_receiver_method != NULL) { 251 next_hit_cg = this->call_generator(next_receiver_method, 252 vtable_index, !call_does_dispatch, jvms, 253 allow_inline, prof_factor); 254 if (next_hit_cg != NULL && !next_hit_cg->is_inline() && 255 have_major_receiver && UseOnlyInlinedBimorphic) { 256 // Skip if we can't inline second receiver's method 257 next_hit_cg = NULL; 258 } 259 } 260 } 261 CallGenerator* miss_cg; 262 Deoptimization::DeoptReason reason = morphism == 2 ? 263 Deoptimization::Reason_bimorphic : Deoptimization::reason_class_check(speculative_receiver_type != NULL); 264 if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) && 265 !too_many_traps(caller, bci, reason) 266 ) { 267 // Generate uncommon trap for class check failure path 268 // in case of monomorphic or bimorphic virtual call site. 269 miss_cg = CallGenerator::for_uncommon_trap(callee, reason, 270 Deoptimization::Action_maybe_recompile); 271 } else { 272 // Generate virtual call for class check failure path 273 // in case of polymorphic virtual call site. 274 miss_cg = CallGenerator::for_virtual_call(callee, vtable_index); 275 } 276 if (miss_cg != NULL) { 277 if (next_hit_cg != NULL) { 278 assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation"); 279 trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)); 280 // We don't need to record dependency on a receiver here and below. 281 // Whenever we inline, the dependency is added by Parse::Parse(). 282 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX); 283 } 284 if (miss_cg != NULL) { 285 trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count); 286 ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0); 287 float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0); 288 CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob); 289 if (cg != NULL) return cg; 290 } 291 } 292 } 293 } 294 } 295 } 296 297 // Nothing claimed the intrinsic, we go with straight-forward inlining 298 // for already discovered intrinsic. 299 if (allow_inline && allow_intrinsics && cg_intrinsic != NULL) { 300 assert(cg_intrinsic->does_virtual_dispatch(), "sanity"); 301 return cg_intrinsic; 302 } 303 304 // There was no special inlining tactic, or it bailed out. 305 // Use a more generic tactic, like a simple call. 306 if (call_does_dispatch) { 307 const char* msg = "virtual call"; 308 if (PrintInlining) print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); 309 C->log_inline_failure(msg); 310 return CallGenerator::for_virtual_call(callee, vtable_index); 311 } else { 312 // Class Hierarchy Analysis or Type Profile reveals a unique target, 313 // or it is a static or special call. 314 return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms)); 315 } 316 } 317 318 // Return true for methods that shouldn't be inlined early so that 319 // they are easier to analyze and optimize as intrinsics. 320 bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) { 321 if (has_stringbuilder()) { 322 323 if ((call_method->holder() == C->env()->StringBuilder_klass() || 324 call_method->holder() == C->env()->StringBuffer_klass()) && 325 (jvms->method()->holder() == C->env()->StringBuilder_klass() || 326 jvms->method()->holder() == C->env()->StringBuffer_klass())) { 327 // Delay SB calls only when called from non-SB code 328 return false; 329 } 330 331 switch (call_method->intrinsic_id()) { 332 case vmIntrinsics::_StringBuilder_void: 333 case vmIntrinsics::_StringBuilder_int: 334 case vmIntrinsics::_StringBuilder_String: 335 case vmIntrinsics::_StringBuilder_append_char: 336 case vmIntrinsics::_StringBuilder_append_int: 337 case vmIntrinsics::_StringBuilder_append_String: 338 case vmIntrinsics::_StringBuilder_toString: 339 case vmIntrinsics::_StringBuffer_void: 340 case vmIntrinsics::_StringBuffer_int: 341 case vmIntrinsics::_StringBuffer_String: 342 case vmIntrinsics::_StringBuffer_append_char: 343 case vmIntrinsics::_StringBuffer_append_int: 344 case vmIntrinsics::_StringBuffer_append_String: 345 case vmIntrinsics::_StringBuffer_toString: 346 case vmIntrinsics::_Integer_toString: 347 return true; 348 349 case vmIntrinsics::_String_String: 350 { 351 Node* receiver = jvms->map()->in(jvms->argoff() + 1); 352 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) { 353 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava(); 354 ciMethod* m = csj->method(); 355 if (m != NULL && 356 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString || 357 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString)) 358 // Delay String.<init>(new SB()) 359 return true; 360 } 361 return false; 362 } 363 364 default: 365 return false; 366 } 367 } 368 return false; 369 } 370 371 bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) { 372 if (eliminate_boxing() && call_method->is_boxing_method()) { 373 set_has_boxed_value(true); 374 return aggressive_unboxing(); 375 } 376 return false; 377 } 378 379 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link 380 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) { 381 // Additional inputs to consider... 382 // bc = bc() 383 // caller = method() 384 // iter().get_method_holder_index() 385 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" ); 386 // Interface classes can be loaded & linked and never get around to 387 // being initialized. Uncommon-trap for not-initialized static or 388 // v-calls. Let interface calls happen. 389 ciInstanceKlass* holder_klass = dest_method->holder(); 390 if (!holder_klass->is_being_initialized() && 391 !holder_klass->is_initialized() && 392 !holder_klass->is_interface()) { 393 uncommon_trap(Deoptimization::Reason_uninitialized, 394 Deoptimization::Action_reinterpret, 395 holder_klass); 396 return true; 397 } 398 399 assert(dest_method->is_loaded(), "dest_method: typeflow responsibility"); 400 return false; 401 } 402 403 #ifdef ASSERT 404 static bool check_call_consistency(JVMState* jvms, CallGenerator* cg) { 405 ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci()); 406 ciMethod* resolved_method = cg->method(); 407 if (!ciMethod::is_consistent_info(symbolic_info, resolved_method)) { 408 tty->print_cr("JVMS:"); 409 jvms->dump(); 410 tty->print_cr("Bytecode info:"); 411 jvms->method()->get_method_at_bci(jvms->bci())->print(); tty->cr(); 412 tty->print_cr("Resolved method:"); 413 cg->method()->print(); tty->cr(); 414 return false; 415 } 416 return true; 417 } 418 #endif // ASSERT 419 420 //------------------------------do_call---------------------------------------- 421 // Handle your basic call. Inline if we can & want to, else just setup call. 422 void Parse::do_call() { 423 // It's likely we are going to add debug info soon. 424 // Also, if we inline a guy who eventually needs debug info for this JVMS, 425 // our contribution to it is cleaned up right here. 426 kill_dead_locals(); 427 428 C->print_inlining_assert_ready(); 429 430 // Set frequently used booleans 431 const bool is_virtual = bc() == Bytecodes::_invokevirtual; 432 const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; 433 const bool has_receiver = Bytecodes::has_receiver(bc()); 434 435 // Find target being called 436 bool will_link; 437 ciSignature* declared_signature = NULL; 438 ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode 439 ciInstanceKlass* holder_klass = orig_callee->holder(); 440 ciKlass* holder = iter().get_declared_method_holder(); 441 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 442 assert(declared_signature != NULL, "cannot be null"); 443 444 // Bump max node limit for JSR292 users 445 if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) { 446 C->set_max_node_limit(3*MaxNodeLimit); 447 } 448 449 // uncommon-trap when callee is unloaded, uninitialized or will not link 450 // bailout when too many arguments for register representation 451 if (!will_link || can_not_compile_call_site(orig_callee, klass)) { 452 if (PrintOpto && (Verbose || WizardMode)) { 453 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); 454 orig_callee->print_name(); tty->cr(); 455 } 456 return; 457 } 458 assert(holder_klass->is_loaded(), ""); 459 //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw) 460 // Note: this takes into account invokeinterface of methods declared in java/lang/Object, 461 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces 462 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); 463 // Note: In the absence of miranda methods, an abstract class K can perform 464 // an invokevirtual directly on an interface method I.m if K implements I. 465 466 // orig_callee is the resolved callee which's signature includes the 467 // appendix argument. 468 const int nargs = orig_callee->arg_size(); 469 const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id()); 470 471 // Push appendix argument (MethodType, CallSite, etc.), if one. 472 if (iter().has_appendix()) { 473 ciObject* appendix_arg = iter().get_appendix(); 474 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg); 475 Node* appendix_arg_node = _gvn.makecon(appendix_arg_type); 476 push(appendix_arg_node); 477 } 478 479 // --------------------- 480 // Does Class Hierarchy Analysis reveal only a single target of a v-call? 481 // Then we may inline or make a static call, but become dependent on there being only 1 target. 482 // Does the call-site type profile reveal only one receiver? 483 // Then we may introduce a run-time check and inline on the path where it succeeds. 484 // The other path may uncommon_trap, check for another receiver, or do a v-call. 485 486 // Try to get the most accurate receiver type 487 ciMethod* callee = orig_callee; 488 int vtable_index = Method::invalid_vtable_index; 489 bool call_does_dispatch = false; 490 491 // Speculative type of the receiver if any 492 ciKlass* speculative_receiver_type = NULL; 493 if (is_virtual_or_interface) { 494 Node* receiver_node = stack(sp() - nargs); 495 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); 496 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 497 // For arrays, klass below is Object. When vtable calls are used, 498 // resolving the call with Object would allow an illegal call to 499 // finalize() on an array. We use holder instead: illegal calls to 500 // finalize() won't be compiled as vtable calls (IC call 501 // resolution will catch the illegal call) and the few legal calls 502 // on array types won't be either. 503 callee = C->optimize_virtual_call(method(), bci(), klass, holder, orig_callee, 504 receiver_type, is_virtual, 505 call_does_dispatch, vtable_index); // out-parameters 506 speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL; 507 } 508 509 // invoke-super-special 510 if (iter().cur_bc_raw() == Bytecodes::_invokespecial && !orig_callee->is_object_initializer()) { 511 ciInstanceKlass* calling_klass = method()->holder(); 512 ciInstanceKlass* sender_klass = 513 calling_klass->is_anonymous() ? calling_klass->host_klass() : 514 calling_klass; 515 if (sender_klass->is_interface()) { 516 Node* receiver_node = stack(sp() - nargs); 517 Node* cls_node = makecon(TypeKlassPtr::make(sender_klass)); 518 Node* bad_type_ctrl = NULL; 519 Node* casted_receiver = gen_checkcast(receiver_node, cls_node, &bad_type_ctrl); 520 if (bad_type_ctrl != NULL) { 521 PreserveJVMState pjvms(this); 522 set_control(bad_type_ctrl); 523 uncommon_trap(Deoptimization::Reason_class_check, 524 Deoptimization::Action_none); 525 } 526 if (stopped()) { 527 return; // MUST uncommon-trap? 528 } 529 set_stack(sp() - nargs, casted_receiver); 530 } 531 } 532 533 // Note: It's OK to try to inline a virtual call. 534 // The call generator will not attempt to inline a polymorphic call 535 // unless it knows how to optimize the receiver dispatch. 536 bool try_inline = (C->do_inlining() || InlineAccessors); 537 538 // --------------------- 539 dec_sp(nargs); // Temporarily pop args for JVM state of call 540 JVMState* jvms = sync_jvms(); 541 542 // --------------------- 543 // Decide call tactic. 544 // This call checks with CHA, the interpreter profile, intrinsics table, etc. 545 // It decides whether inlining is desirable or not. 546 CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type); 547 548 // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead. 549 orig_callee = callee = NULL; 550 551 // --------------------- 552 // Round double arguments before call 553 round_double_arguments(cg->method()); 554 555 // Feed profiling data for arguments to the type system so it can 556 // propagate it as speculative types 557 record_profiled_arguments_for_speculation(cg->method(), bc()); 558 559 #ifndef PRODUCT 560 // bump global counters for calls 561 count_compiled_calls(/*at_method_entry*/ false, cg->is_inline()); 562 563 // Record first part of parsing work for this call 564 parse_histogram()->record_change(); 565 #endif // not PRODUCT 566 567 assert(jvms == this->jvms(), "still operating on the right JVMS"); 568 assert(jvms_in_sync(), "jvms must carry full info into CG"); 569 570 // save across call, for a subsequent cast_not_null. 571 Node* receiver = has_receiver ? argument(0) : NULL; 572 573 // The extra CheckCastPP for speculative types mess with PhaseStringOpts 574 if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) { 575 // Feed profiling data for a single receiver to the type system so 576 // it can propagate it as a speculative type 577 receiver = record_profiled_receiver_for_speculation(receiver); 578 } 579 580 // Bump method data counters (We profile *before* the call is made 581 // because exceptions don't return to the call site.) 582 profile_call(receiver); 583 584 JVMState* new_jvms = cg->generate(jvms); 585 if (new_jvms == NULL) { 586 // When inlining attempt fails (e.g., too many arguments), 587 // it may contaminate the current compile state, making it 588 // impossible to pull back and try again. Once we call 589 // cg->generate(), we are committed. If it fails, the whole 590 // compilation task is compromised. 591 if (failing()) return; 592 593 // This can happen if a library intrinsic is available, but refuses 594 // the call site, perhaps because it did not match a pattern the 595 // intrinsic was expecting to optimize. Should always be possible to 596 // get a normal java call that may inline in that case 597 cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false); 598 new_jvms = cg->generate(jvms); 599 if (new_jvms == NULL) { 600 guarantee(failing(), "call failed to generate: calls should work"); 601 return; 602 } 603 } 604 605 if (cg->is_inline()) { 606 // Accumulate has_loops estimate 607 C->set_has_loops(C->has_loops() || cg->method()->has_loops()); 608 C->env()->notice_inlined_method(cg->method()); 609 } 610 611 // Reset parser state from [new_]jvms, which now carries results of the call. 612 // Return value (if any) is already pushed on the stack by the cg. 613 add_exception_states_from(new_jvms); 614 if (new_jvms->map()->control() == top()) { 615 stop_and_kill_map(); 616 } else { 617 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged"); 618 set_jvms(new_jvms); 619 } 620 621 assert(check_call_consistency(jvms, cg), "inconsistent info"); 622 623 if (!stopped()) { 624 // This was some sort of virtual call, which did a null check for us. 625 // Now we can assert receiver-not-null, on the normal return path. 626 if (receiver != NULL && cg->is_virtual()) { 627 Node* cast = cast_not_null(receiver); 628 // %%% assert(receiver == cast, "should already have cast the receiver"); 629 } 630 631 // Round double result after a call from strict to non-strict code 632 round_double_result(cg->method()); 633 634 ciType* rtype = cg->method()->return_type(); 635 ciType* ctype = declared_signature->return_type(); 636 637 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) { 638 // Be careful here with return types. 639 if (ctype != rtype) { 640 BasicType rt = rtype->basic_type(); 641 BasicType ct = ctype->basic_type(); 642 if (ct == T_VOID) { 643 // It's OK for a method to return a value that is discarded. 644 // The discarding does not require any special action from the caller. 645 // The Java code knows this, at VerifyType.isNullConversion. 646 pop_node(rt); // whatever it was, pop it 647 } else if (rt == T_INT || is_subword_type(rt)) { 648 // Nothing. These cases are handled in lambda form bytecode. 649 assert(ct == T_INT || is_subword_type(ct), "must match: rt=%s, ct=%s", type2name(rt), type2name(ct)); 650 } else if (rt == T_OBJECT || rt == T_ARRAY) { 651 assert(ct == T_OBJECT || ct == T_ARRAY, "rt=%s, ct=%s", type2name(rt), type2name(ct)); 652 if (ctype->is_loaded()) { 653 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); 654 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); 655 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 656 Node* retnode = pop(); 657 Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type)); 658 push(cast_obj); 659 } 660 } 661 } else if (rt == T_VALUETYPE) { 662 assert(ct == T_VALUETYPE, "value type expected but got rt=%s, ct=%s", type2name(rt), type2name(ct)); 663 if (rtype == C->env()->___Value_klass()) { 664 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); 665 Node* retnode = pop(); 666 Node* cast = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type)); 667 Node* vt = ValueTypeNode::make(_gvn, merged_memory(), cast); 668 push(vt); 669 } else { 670 assert(ctype == C->env()->___Value_klass(), "unexpected value type klass"); 671 Node* retnode = pop(); 672 assert(retnode->is_ValueType(), "inconsistent"); 673 retnode = retnode->as_ValueType()->store_to_memory(this); 674 push(retnode); 675 } 676 } else { 677 assert(rt == ct, "unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct)); 678 // push a zero; it's better than getting an oop/int mismatch 679 pop_node(rt); 680 Node* retnode = zerocon(ct); 681 push_node(ct, retnode); 682 } 683 // Now that the value is well-behaved, continue with the call-site type. 684 rtype = ctype; 685 } 686 } else { 687 // Symbolic resolution enforces the types to be the same. 688 // NOTE: We must relax the assert for unloaded types because two 689 // different ciType instances of the same unloaded class type 690 // can appear to be "loaded" by different loaders (depending on 691 // the accessing class). 692 assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype, 693 "mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name()); 694 } 695 696 // If the return type of the method is not loaded, assert that the 697 // value we got is a null. Otherwise, we need to recompile. 698 if (!rtype->is_loaded()) { 699 if (PrintOpto && (Verbose || WizardMode)) { 700 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci()); 701 cg->method()->print_name(); tty->cr(); 702 } 703 if (C->log() != NULL) { 704 C->log()->elem("assert_null reason='return' klass='%d'", 705 C->log()->identify(rtype)); 706 } 707 // If there is going to be a trap, put it at the next bytecode: 708 set_bci(iter().next_bci()); 709 null_assert(peek()); 710 set_bci(iter().cur_bci()); // put it back 711 } 712 BasicType ct = ctype->basic_type(); 713 if (ct == T_OBJECT || ct == T_ARRAY) { 714 record_profiled_return_for_speculation(); 715 } 716 } 717 718 // Restart record of parsing work after possible inlining of call 719 #ifndef PRODUCT 720 parse_histogram()->set_initial_state(bc()); 721 #endif 722 } 723 724 //---------------------------catch_call_exceptions----------------------------- 725 // Put a Catch and CatchProj nodes behind a just-created call. 726 // Send their caught exceptions to the proper handler. 727 // This may be used after a call to the rethrow VM stub, 728 // when it is needed to process unloaded exception classes. 729 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) { 730 // Exceptions are delivered through this channel: 731 Node* i_o = this->i_o(); 732 733 // Add a CatchNode. 734 GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1); 735 GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL); 736 GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0); 737 738 for (; !handlers.is_done(); handlers.next()) { 739 ciExceptionHandler* h = handlers.handler(); 740 int h_bci = h->handler_bci(); 741 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass(); 742 // Do not introduce unloaded exception types into the graph: 743 if (!h_klass->is_loaded()) { 744 if (saw_unloaded->contains(h_bci)) { 745 /* We've already seen an unloaded exception with h_bci, 746 so don't duplicate. Duplication will cause the CatchNode to be 747 unnecessarily large. See 4713716. */ 748 continue; 749 } else { 750 saw_unloaded->append(h_bci); 751 } 752 } 753 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass); 754 // (We use make_from_klass because it respects UseUniqueSubclasses.) 755 h_extype = h_extype->join(TypeInstPtr::NOTNULL); 756 assert(!h_extype->empty(), "sanity"); 757 // Note: It's OK if the BCIs repeat themselves. 758 bcis->append(h_bci); 759 extypes->append(h_extype); 760 } 761 762 int len = bcis->length(); 763 CatchNode *cn = new CatchNode(control(), i_o, len+1); 764 Node *catch_ = _gvn.transform(cn); 765 766 // now branch with the exception state to each of the (potential) 767 // handlers 768 for(int i=0; i < len; i++) { 769 // Setup JVM state to enter the handler. 770 PreserveJVMState pjvms(this); 771 // Locals are just copied from before the call. 772 // Get control from the CatchNode. 773 int handler_bci = bcis->at(i); 774 Node* ctrl = _gvn.transform( new CatchProjNode(catch_, i+1,handler_bci)); 775 // This handler cannot happen? 776 if (ctrl == top()) continue; 777 set_control(ctrl); 778 779 // Create exception oop 780 const TypeInstPtr* extype = extypes->at(i)->is_instptr(); 781 Node *ex_oop = _gvn.transform(new CreateExNode(extypes->at(i), ctrl, i_o)); 782 783 // Handle unloaded exception classes. 784 if (saw_unloaded->contains(handler_bci)) { 785 // An unloaded exception type is coming here. Do an uncommon trap. 786 #ifndef PRODUCT 787 // We do not expect the same handler bci to take both cold unloaded 788 // and hot loaded exceptions. But, watch for it. 789 if ((Verbose || WizardMode) && extype->is_loaded()) { 790 tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci()); 791 method()->print_name(); tty->cr(); 792 } else if (PrintOpto && (Verbose || WizardMode)) { 793 tty->print("Bailing out on unloaded exception type "); 794 extype->klass()->print_name(); 795 tty->print(" at bci:%d in ", bci()); 796 method()->print_name(); tty->cr(); 797 } 798 #endif 799 // Emit an uncommon trap instead of processing the block. 800 set_bci(handler_bci); 801 push_ex_oop(ex_oop); 802 uncommon_trap(Deoptimization::Reason_unloaded, 803 Deoptimization::Action_reinterpret, 804 extype->klass(), "!loaded exception"); 805 set_bci(iter().cur_bci()); // put it back 806 continue; 807 } 808 809 // go to the exception handler 810 if (handler_bci < 0) { // merge with corresponding rethrow node 811 throw_to_exit(make_exception_state(ex_oop)); 812 } else { // Else jump to corresponding handle 813 push_ex_oop(ex_oop); // Clear stack and push just the oop. 814 merge_exception(handler_bci); 815 } 816 } 817 818 // The first CatchProj is for the normal return. 819 // (Note: If this is a call to rethrow_Java, this node goes dead.) 820 set_control(_gvn.transform( new CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci))); 821 } 822 823 824 //----------------------------catch_inline_exceptions-------------------------- 825 // Handle all exceptions thrown by an inlined method or individual bytecode. 826 // Common case 1: we have no handler, so all exceptions merge right into 827 // the rethrow case. 828 // Case 2: we have some handlers, with loaded exception klasses that have 829 // no subklasses. We do a Deutsch-Shiffman style type-check on the incoming 830 // exception oop and branch to the handler directly. 831 // Case 3: We have some handlers with subklasses or are not loaded at 832 // compile-time. We have to call the runtime to resolve the exception. 833 // So we insert a RethrowCall and all the logic that goes with it. 834 void Parse::catch_inline_exceptions(SafePointNode* ex_map) { 835 // Caller is responsible for saving away the map for normal control flow! 836 assert(stopped(), "call set_map(NULL) first"); 837 assert(method()->has_exception_handlers(), "don't come here w/o work to do"); 838 839 Node* ex_node = saved_ex_oop(ex_map); 840 if (ex_node == top()) { 841 // No action needed. 842 return; 843 } 844 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr(); 845 NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr")); 846 if (ex_type == NULL) 847 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr(); 848 849 // determine potential exception handlers 850 ciExceptionHandlerStream handlers(method(), bci(), 851 ex_type->klass()->as_instance_klass(), 852 ex_type->klass_is_exact()); 853 854 // Start executing from the given throw state. (Keep its stack, for now.) 855 // Get the exception oop as known at compile time. 856 ex_node = use_exception_state(ex_map); 857 858 // Get the exception oop klass from its header 859 Node* ex_klass_node = NULL; 860 if (has_ex_handler() && !ex_type->klass_is_exact()) { 861 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes()); 862 ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT)); 863 864 // Compute the exception klass a little more cleverly. 865 // Obvious solution is to simple do a LoadKlass from the 'ex_node'. 866 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for 867 // each arm of the Phi. If I know something clever about the exceptions 868 // I'm loading the class from, I can replace the LoadKlass with the 869 // klass constant for the exception oop. 870 if (ex_node->is_Phi()) { 871 ex_klass_node = new PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT); 872 for (uint i = 1; i < ex_node->req(); i++) { 873 Node* ex_in = ex_node->in(i); 874 if (ex_in == top() || ex_in == NULL) { 875 // This path was not taken. 876 ex_klass_node->init_req(i, top()); 877 continue; 878 } 879 Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes()); 880 Node* k = _gvn.transform( LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT)); 881 ex_klass_node->init_req( i, k ); 882 } 883 _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT); 884 885 } 886 } 887 888 // Scan the exception table for applicable handlers. 889 // If none, we can call rethrow() and be done! 890 // If precise (loaded with no subklasses), insert a D.S. style 891 // pointer compare to the correct handler and loop back. 892 // If imprecise, switch to the Rethrow VM-call style handling. 893 894 int remaining = handlers.count_remaining(); 895 896 // iterate through all entries sequentially 897 for (;!handlers.is_done(); handlers.next()) { 898 ciExceptionHandler* handler = handlers.handler(); 899 900 if (handler->is_rethrow()) { 901 // If we fell off the end of the table without finding an imprecise 902 // exception klass (and without finding a generic handler) then we 903 // know this exception is not handled in this method. We just rethrow 904 // the exception into the caller. 905 throw_to_exit(make_exception_state(ex_node)); 906 return; 907 } 908 909 // exception handler bci range covers throw_bci => investigate further 910 int handler_bci = handler->handler_bci(); 911 912 if (remaining == 1) { 913 push_ex_oop(ex_node); // Push exception oop for handler 914 if (PrintOpto && WizardMode) { 915 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci); 916 } 917 merge_exception(handler_bci); // jump to handler 918 return; // No more handling to be done here! 919 } 920 921 // Get the handler's klass 922 ciInstanceKlass* klass = handler->catch_klass(); 923 924 if (!klass->is_loaded()) { // klass is not loaded? 925 // fall through into catch_call_exceptions which will emit a 926 // handler with an uncommon trap. 927 break; 928 } 929 930 if (klass->is_interface()) // should not happen, but... 931 break; // bail out 932 933 // Check the type of the exception against the catch type 934 const TypeKlassPtr *tk = TypeKlassPtr::make(klass); 935 Node* con = _gvn.makecon(tk); 936 Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con); 937 if (!stopped()) { 938 PreserveJVMState pjvms(this); 939 const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr(); 940 assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness"); 941 Node* ex_oop = _gvn.transform(new CheckCastPPNode(control(), ex_node, tinst)); 942 push_ex_oop(ex_oop); // Push exception oop for handler 943 if (PrintOpto && WizardMode) { 944 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci); 945 klass->print_name(); 946 tty->cr(); 947 } 948 merge_exception(handler_bci); 949 } 950 set_control(not_subtype_ctrl); 951 952 // Come here if exception does not match handler. 953 // Carry on with more handler checks. 954 --remaining; 955 } 956 957 assert(!stopped(), "you should return if you finish the chain"); 958 959 // Oops, need to call into the VM to resolve the klasses at runtime. 960 // Note: This call must not deoptimize, since it is not a real at this bci! 961 kill_dead_locals(); 962 963 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW, 964 OptoRuntime::rethrow_Type(), 965 OptoRuntime::rethrow_stub(), 966 NULL, NULL, 967 ex_node); 968 969 // Rethrow is a pure call, no side effects, only a result. 970 // The result cannot be allocated, so we use I_O 971 972 // Catch exceptions from the rethrow 973 catch_call_exceptions(handlers); 974 } 975 976 977 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.) 978 979 980 #ifndef PRODUCT 981 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) { 982 if( CountCompiledCalls ) { 983 if( at_method_entry ) { 984 // bump invocation counter if top method (for statistics) 985 if (CountCompiledCalls && depth() == 1) { 986 const TypePtr* addr_type = TypeMetadataPtr::make(method()); 987 Node* adr1 = makecon(addr_type); 988 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset())); 989 increment_counter(adr2); 990 } 991 } else if (is_inline) { 992 switch (bc()) { 993 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break; 994 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break; 995 case Bytecodes::_invokestatic: 996 case Bytecodes::_invokedynamic: 997 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break; 998 default: fatal("unexpected call bytecode"); 999 } 1000 } else { 1001 switch (bc()) { 1002 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break; 1003 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break; 1004 case Bytecodes::_invokestatic: 1005 case Bytecodes::_invokedynamic: 1006 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break; 1007 default: fatal("unexpected call bytecode"); 1008 } 1009 } 1010 } 1011 } 1012 #endif //PRODUCT 1013 1014 1015 ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, 1016 ciKlass* holder, ciMethod* callee, 1017 const TypeOopPtr* receiver_type, bool is_virtual, 1018 bool& call_does_dispatch, int& vtable_index, 1019 bool check_access) { 1020 // Set default values for out-parameters. 1021 call_does_dispatch = true; 1022 vtable_index = Method::invalid_vtable_index; 1023 1024 // Choose call strategy. 1025 ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee, 1026 receiver_type, check_access); 1027 1028 // Have the call been sufficiently improved such that it is no longer a virtual? 1029 if (optimized_virtual_method != NULL) { 1030 callee = optimized_virtual_method; 1031 call_does_dispatch = false; 1032 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) { 1033 // We can make a vtable call at this site 1034 vtable_index = callee->resolve_vtable_index(caller->holder(), holder); 1035 } 1036 return callee; 1037 } 1038 1039 // Identify possible target method and inlining style 1040 ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, 1041 ciMethod* callee, const TypeOopPtr* receiver_type, 1042 bool check_access) { 1043 // only use for virtual or interface calls 1044 1045 // If it is obviously final, do not bother to call find_monomorphic_target, 1046 // because the class hierarchy checks are not needed, and may fail due to 1047 // incompletely loaded classes. Since we do our own class loading checks 1048 // in this module, we may confidently bind to any method. 1049 if (callee->can_be_statically_bound()) { 1050 return callee; 1051 } 1052 1053 // Attempt to improve the receiver 1054 bool actual_receiver_is_exact = false; 1055 ciInstanceKlass* actual_receiver = klass; 1056 if (receiver_type != NULL) { 1057 // Array methods are all inherited from Object, and are monomorphic. 1058 // finalize() call on array is not allowed. 1059 if (receiver_type->isa_aryptr() && 1060 callee->holder() == env()->Object_klass() && 1061 callee->name() != ciSymbol::finalize_method_name()) { 1062 return callee; 1063 } 1064 1065 // All other interesting cases are instance klasses. 1066 if (!receiver_type->isa_instptr()) { 1067 return NULL; 1068 } 1069 1070 ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass(); 1071 if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() && 1072 (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) { 1073 // ikl is a same or better type than the original actual_receiver, 1074 // e.g. static receiver from bytecodes. 1075 actual_receiver = ikl; 1076 // Is the actual_receiver exact? 1077 actual_receiver_is_exact = receiver_type->klass_is_exact(); 1078 } 1079 } 1080 1081 ciInstanceKlass* calling_klass = caller->holder(); 1082 ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access); 1083 if (cha_monomorphic_target != NULL) { 1084 assert(!cha_monomorphic_target->is_abstract(), ""); 1085 // Look at the method-receiver type. Does it add "too much information"? 1086 ciKlass* mr_klass = cha_monomorphic_target->holder(); 1087 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass); 1088 if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) { 1089 // Calling this method would include an implicit cast to its holder. 1090 // %%% Not yet implemented. Would throw minor asserts at present. 1091 // %%% The most common wins are already gained by +UseUniqueSubclasses. 1092 // To fix, put the higher_equal check at the call of this routine, 1093 // and add a CheckCastPP to the receiver. 1094 if (TraceDependencies) { 1095 tty->print_cr("found unique CHA method, but could not cast up"); 1096 tty->print(" method = "); 1097 cha_monomorphic_target->print(); 1098 tty->cr(); 1099 } 1100 if (log() != NULL) { 1101 log()->elem("missed_CHA_opportunity klass='%d' method='%d'", 1102 log()->identify(klass), 1103 log()->identify(cha_monomorphic_target)); 1104 } 1105 cha_monomorphic_target = NULL; 1106 } 1107 } 1108 if (cha_monomorphic_target != NULL) { 1109 // Hardwiring a virtual. 1110 // If we inlined because CHA revealed only a single target method, 1111 // then we are dependent on that target method not getting overridden 1112 // by dynamic class loading. Be sure to test the "static" receiver 1113 // dest_method here, as opposed to the actual receiver, which may 1114 // falsely lead us to believe that the receiver is final or private. 1115 dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target); 1116 return cha_monomorphic_target; 1117 } 1118 1119 // If the type is exact, we can still bind the method w/o a vcall. 1120 // (This case comes after CHA so we can see how much extra work it does.) 1121 if (actual_receiver_is_exact) { 1122 // In case of evolution, there is a dependence on every inlined method, since each 1123 // such method can be changed when its class is redefined. 1124 ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver); 1125 if (exact_method != NULL) { 1126 if (PrintOpto) { 1127 tty->print(" Calling method via exact type @%d --- ", bci); 1128 exact_method->print_name(); 1129 tty->cr(); 1130 } 1131 return exact_method; 1132 } 1133 } 1134 1135 return NULL; 1136 }