1 /* 2 * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciCallSite.hpp" 27 #include "ci/ciMethodHandle.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileBroker.hpp" 30 #include "compiler/compileLog.hpp" 31 #include "interpreter/linkResolver.hpp" 32 #include "opto/addnode.hpp" 33 #include "opto/callGenerator.hpp" 34 #include "opto/cfgnode.hpp" 35 #include "opto/mulnode.hpp" 36 #include "opto/parse.hpp" 37 #include "opto/rootnode.hpp" 38 #include "opto/runtime.hpp" 39 #include "opto/subnode.hpp" 40 #include "prims/nativeLookup.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 43 void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) { 44 if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) { 45 outputStream* out = tty; 46 if (!PrintInlining) { 47 if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) { 48 method->print_short_name(); 49 tty->cr(); 50 } 51 CompileTask::print_inlining(prof_method, depth, bci); 52 } else { 53 out = C->print_inlining_stream(); 54 } 55 CompileTask::print_inline_indent(depth, out); 56 out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count); 57 stringStream ss; 58 prof_klass->name()->print_symbol_on(&ss); 59 out->print(ss.as_string()); 60 out->cr(); 61 } 62 } 63 64 CallGenerator* Compile::inline_intrinsic(ciMethod* callee, int vtable_index, bool call_does_dispatch, 65 JVMState* jvms, bool allow_inline, float prof_factor) { 66 CallGenerator* cg = find_intrinsic(callee, call_does_dispatch); 67 if (cg != NULL && cg->is_predicted()) { 68 // Code without intrinsic but, hopefully, inlined. 69 CallGenerator* inline_cg = this->call_generator(callee, 70 vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, false); 71 if (inline_cg != NULL) { 72 cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg); 73 } 74 } 75 return cg; 76 } 77 78 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch, 79 JVMState* jvms, bool allow_inline, 80 float prof_factor, bool allow_intrinsics, bool delayed_forbidden) { 81 ciMethod* caller = jvms->method(); 82 int bci = jvms->bci(); 83 Bytecodes::Code bytecode = caller->java_code_at_bci(bci); 84 guarantee(callee != NULL, "failed method resolution"); 85 86 // Dtrace currently doesn't work unless all calls are vanilla 87 if (env()->dtrace_method_probes()) { 88 allow_inline = false; 89 } 90 91 // Note: When we get profiling during stage-1 compiles, we want to pull 92 // from more specific profile data which pertains to this inlining. 93 // Right now, ignore the information in jvms->caller(), and do method[bci]. 94 ciCallProfile profile = caller->call_profile_at_bci(bci); 95 96 // See how many times this site has been invoked. 97 int site_count = profile.count(); 98 int receiver_count = -1; 99 if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) { 100 // Receivers in the profile structure are ordered by call counts 101 // so that the most called (major) receiver is profile.receiver(0). 102 receiver_count = profile.receiver_count(0); 103 } 104 105 CompileLog* log = this->log(); 106 if (log != NULL) { 107 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; 108 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1; 109 log->begin_elem("call method='%d' count='%d' prof_factor='%g'", 110 log->identify(callee), site_count, prof_factor); 111 if (call_does_dispatch) log->print(" virtual='1'"); 112 if (allow_inline) log->print(" inline='1'"); 113 if (receiver_count >= 0) { 114 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count); 115 if (profile.has_receiver(1)) { 116 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1)); 117 } 118 } 119 log->end_elem(); 120 } 121 122 // Special case the handling of certain common, profitable library 123 // methods. If these methods are replaced with specialized code, 124 // then we return it as the inlined version of the call. 125 // We do this before the strict f.p. check below because the 126 // intrinsics handle strict f.p. correctly. 127 if (allow_inline && allow_intrinsics) { 128 CallGenerator* cg = inline_intrinsic(callee, 129 vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor); 130 // If intrinsic is marked low priority, then skip inlining until 131 // other inlining mechanics is in effect. We will retry after everything 132 // else is tried. 133 if (cg != NULL && !cg->is_low_priority()) { 134 return cg; 135 } 136 } 137 138 // Do method handle calls. 139 // NOTE: This must happen before normal inlining logic below since 140 // MethodHandle.invoke* are native methods which obviously don't 141 // have bytecodes and so normal inlining fails. 142 if (callee->is_method_handle_intrinsic()) { 143 CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, delayed_forbidden); 144 assert(cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator"); 145 return cg; 146 } 147 148 // Do not inline strict fp into non-strict code, or the reverse 149 if (caller->is_strict() ^ callee->is_strict()) { 150 allow_inline = false; 151 } 152 153 // Attempt to inline... 154 if (allow_inline) { 155 // The profile data is only partly attributable to this caller, 156 // scale back the call site information. 157 float past_uses = jvms->method()->scale_count(site_count, prof_factor); 158 // This is the number of times we expect the call code to be used. 159 float expected_uses = past_uses; 160 161 // Try inlining a bytecoded method: 162 if (!call_does_dispatch) { 163 InlineTree* ilt; 164 if (UseOldInlining) { 165 ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method()); 166 } else { 167 // Make a disembodied, stateless ILT. 168 // TO DO: When UseOldInlining is removed, copy the ILT code elsewhere. 169 float site_invoke_ratio = prof_factor; 170 // Note: ilt is for the root of this parse, not the present call site. 171 ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel); 172 } 173 WarmCallInfo scratch_ci; 174 if (!UseOldInlining) 175 scratch_ci.init(jvms, callee, profile, prof_factor); 176 bool should_delay = false; 177 WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay); 178 assert(ci != &scratch_ci, "do not let this pointer escape"); 179 bool allow_inline = (ci != NULL && !ci->is_cold()); 180 bool require_inline = (allow_inline && ci->is_hot()); 181 182 if (allow_inline) { 183 CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses); 184 185 if (require_inline && cg != NULL) { 186 // Delay the inlining of this method to give us the 187 // opportunity to perform some high level optimizations 188 // first. 189 if (should_delay_string_inlining(callee, jvms)) { 190 assert(!delayed_forbidden, "strange"); 191 return CallGenerator::for_string_late_inline(callee, cg); 192 } else if (should_delay_boxing_inlining(callee, jvms)) { 193 assert(!delayed_forbidden, "strange"); 194 return CallGenerator::for_boxing_late_inline(callee, cg); 195 } else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) { 196 return CallGenerator::for_late_inline(callee, cg); 197 } 198 } 199 if (cg == NULL || should_delay) { 200 // Fall through. 201 } else if (require_inline || !InlineWarmCalls) { 202 return cg; 203 } else { 204 CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor); 205 return CallGenerator::for_warm_call(ci, cold_cg, cg); 206 } 207 } 208 } 209 210 // Try using the type profile. 211 if (call_does_dispatch && site_count > 0 && receiver_count > 0) { 212 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count. 213 bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent); 214 ciMethod* receiver_method = NULL; 215 if (have_major_receiver || profile.morphism() == 1 || 216 (profile.morphism() == 2 && UseBimorphicInlining)) { 217 // receiver_method = profile.method(); 218 // Profiles do not suggest methods now. Look it up in the major receiver. 219 receiver_method = callee->resolve_invoke(jvms->method()->holder(), 220 profile.receiver(0)); 221 } 222 if (receiver_method != NULL) { 223 // The single majority receiver sufficiently outweighs the minority. 224 CallGenerator* hit_cg = this->call_generator(receiver_method, 225 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor); 226 if (hit_cg != NULL) { 227 // Look up second receiver. 228 CallGenerator* next_hit_cg = NULL; 229 ciMethod* next_receiver_method = NULL; 230 if (profile.morphism() == 2 && UseBimorphicInlining) { 231 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(), 232 profile.receiver(1)); 233 if (next_receiver_method != NULL) { 234 next_hit_cg = this->call_generator(next_receiver_method, 235 vtable_index, !call_does_dispatch, jvms, 236 allow_inline, prof_factor); 237 if (next_hit_cg != NULL && !next_hit_cg->is_inline() && 238 have_major_receiver && UseOnlyInlinedBimorphic) { 239 // Skip if we can't inline second receiver's method 240 next_hit_cg = NULL; 241 } 242 } 243 } 244 CallGenerator* miss_cg; 245 Deoptimization::DeoptReason reason = (profile.morphism() == 2) ? 246 Deoptimization::Reason_bimorphic : 247 Deoptimization::Reason_class_check; 248 if (( profile.morphism() == 1 || 249 (profile.morphism() == 2 && next_hit_cg != NULL) ) && 250 !too_many_traps(jvms->method(), jvms->bci(), reason) 251 ) { 252 // Generate uncommon trap for class check failure path 253 // in case of monomorphic or bimorphic virtual call site. 254 miss_cg = CallGenerator::for_uncommon_trap(callee, reason, 255 Deoptimization::Action_maybe_recompile); 256 } else { 257 // Generate virtual call for class check failure path 258 // in case of polymorphic virtual call site. 259 miss_cg = CallGenerator::for_virtual_call(callee, vtable_index); 260 } 261 if (miss_cg != NULL) { 262 if (next_hit_cg != NULL) { 263 trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)); 264 // We don't need to record dependency on a receiver here and below. 265 // Whenever we inline, the dependency is added by Parse::Parse(). 266 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX); 267 } 268 if (miss_cg != NULL) { 269 trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count); 270 CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0)); 271 if (cg != NULL) return cg; 272 } 273 } 274 } 275 } 276 } 277 } 278 279 // Try intrinsics again. This will inline any low-priority intrinsics, 280 // if the code above did not take are of them. 281 if (allow_inline && allow_intrinsics) { 282 CallGenerator* cg = inline_intrinsic(callee, 283 vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor); 284 if (cg != NULL) { 285 return cg; 286 } 287 } 288 289 // There was no special inlining tactic, or it bailed out. 290 // Use a more generic tactic, like a simple call. 291 if (call_does_dispatch) { 292 return CallGenerator::for_virtual_call(callee, vtable_index); 293 } else { 294 // Class Hierarchy Analysis or Type Profile reveals a unique target, 295 // or it is a static or special call. 296 return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms)); 297 } 298 } 299 300 // Return true for methods that shouldn't be inlined early so that 301 // they are easier to analyze and optimize as intrinsics. 302 bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) { 303 if (has_stringbuilder()) { 304 305 if ((call_method->holder() == C->env()->StringBuilder_klass() || 306 call_method->holder() == C->env()->StringBuffer_klass()) && 307 (jvms->method()->holder() == C->env()->StringBuilder_klass() || 308 jvms->method()->holder() == C->env()->StringBuffer_klass())) { 309 // Delay SB calls only when called from non-SB code 310 return false; 311 } 312 313 switch (call_method->intrinsic_id()) { 314 case vmIntrinsics::_StringBuilder_void: 315 case vmIntrinsics::_StringBuilder_int: 316 case vmIntrinsics::_StringBuilder_String: 317 case vmIntrinsics::_StringBuilder_append_char: 318 case vmIntrinsics::_StringBuilder_append_int: 319 case vmIntrinsics::_StringBuilder_append_String: 320 case vmIntrinsics::_StringBuilder_toString: 321 case vmIntrinsics::_StringBuffer_void: 322 case vmIntrinsics::_StringBuffer_int: 323 case vmIntrinsics::_StringBuffer_String: 324 case vmIntrinsics::_StringBuffer_append_char: 325 case vmIntrinsics::_StringBuffer_append_int: 326 case vmIntrinsics::_StringBuffer_append_String: 327 case vmIntrinsics::_StringBuffer_toString: 328 case vmIntrinsics::_Integer_toString: 329 return true; 330 331 case vmIntrinsics::_String_String: 332 { 333 Node* receiver = jvms->map()->in(jvms->argoff() + 1); 334 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) { 335 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava(); 336 ciMethod* m = csj->method(); 337 if (m != NULL && 338 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString || 339 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString)) 340 // Delay String.<init>(new SB()) 341 return true; 342 } 343 return false; 344 } 345 346 default: 347 return false; 348 } 349 } 350 return false; 351 } 352 353 bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) { 354 if (eliminate_boxing() && call_method->is_boxing_method()) { 355 set_has_boxed_value(true); 356 return true; 357 } 358 return false; 359 } 360 361 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link 362 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) { 363 // Additional inputs to consider... 364 // bc = bc() 365 // caller = method() 366 // iter().get_method_holder_index() 367 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" ); 368 // Interface classes can be loaded & linked and never get around to 369 // being initialized. Uncommon-trap for not-initialized static or 370 // v-calls. Let interface calls happen. 371 ciInstanceKlass* holder_klass = dest_method->holder(); 372 if (!holder_klass->is_being_initialized() && 373 !holder_klass->is_initialized() && 374 !holder_klass->is_interface()) { 375 uncommon_trap(Deoptimization::Reason_uninitialized, 376 Deoptimization::Action_reinterpret, 377 holder_klass); 378 return true; 379 } 380 381 assert(dest_method->is_loaded(), "dest_method: typeflow responsibility"); 382 return false; 383 } 384 385 386 //------------------------------do_call---------------------------------------- 387 // Handle your basic call. Inline if we can & want to, else just setup call. 388 void Parse::do_call() { 389 // It's likely we are going to add debug info soon. 390 // Also, if we inline a guy who eventually needs debug info for this JVMS, 391 // our contribution to it is cleaned up right here. 392 kill_dead_locals(); 393 394 // Set frequently used booleans 395 const bool is_virtual = bc() == Bytecodes::_invokevirtual; 396 const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; 397 const bool has_receiver = Bytecodes::has_receiver(bc()); 398 399 // Find target being called 400 bool will_link; 401 ciSignature* declared_signature = NULL; 402 ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode 403 ciInstanceKlass* holder_klass = orig_callee->holder(); 404 ciKlass* holder = iter().get_declared_method_holder(); 405 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 406 assert(declared_signature != NULL, "cannot be null"); 407 408 // uncommon-trap when callee is unloaded, uninitialized or will not link 409 // bailout when too many arguments for register representation 410 if (!will_link || can_not_compile_call_site(orig_callee, klass)) { 411 #ifndef PRODUCT 412 if (PrintOpto && (Verbose || WizardMode)) { 413 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); 414 orig_callee->print_name(); tty->cr(); 415 } 416 #endif 417 return; 418 } 419 assert(holder_klass->is_loaded(), ""); 420 //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw) 421 // Note: this takes into account invokeinterface of methods declared in java/lang/Object, 422 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces 423 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); 424 // Note: In the absence of miranda methods, an abstract class K can perform 425 // an invokevirtual directly on an interface method I.m if K implements I. 426 427 // orig_callee is the resolved callee which's signature includes the 428 // appendix argument. 429 const int nargs = orig_callee->arg_size(); 430 const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id()); 431 432 // Push appendix argument (MethodType, CallSite, etc.), if one. 433 if (iter().has_appendix()) { 434 ciObject* appendix_arg = iter().get_appendix(); 435 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg); 436 Node* appendix_arg_node = _gvn.makecon(appendix_arg_type); 437 push(appendix_arg_node); 438 } 439 440 // --------------------- 441 // Does Class Hierarchy Analysis reveal only a single target of a v-call? 442 // Then we may inline or make a static call, but become dependent on there being only 1 target. 443 // Does the call-site type profile reveal only one receiver? 444 // Then we may introduce a run-time check and inline on the path where it succeeds. 445 // The other path may uncommon_trap, check for another receiver, or do a v-call. 446 447 // Try to get the most accurate receiver type 448 ciMethod* callee = orig_callee; 449 int vtable_index = Method::invalid_vtable_index; 450 bool call_does_dispatch = false; 451 452 if (is_virtual_or_interface) { 453 Node* receiver_node = stack(sp() - nargs); 454 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); 455 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 456 callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type, 457 is_virtual, 458 call_does_dispatch, vtable_index); // out-parameters 459 } 460 461 // Note: It's OK to try to inline a virtual call. 462 // The call generator will not attempt to inline a polymorphic call 463 // unless it knows how to optimize the receiver dispatch. 464 bool try_inline = (C->do_inlining() || InlineAccessors); 465 466 // --------------------- 467 dec_sp(nargs); // Temporarily pop args for JVM state of call 468 JVMState* jvms = sync_jvms(); 469 470 // --------------------- 471 // Decide call tactic. 472 // This call checks with CHA, the interpreter profile, intrinsics table, etc. 473 // It decides whether inlining is desirable or not. 474 CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor()); 475 476 // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead. 477 orig_callee = callee = NULL; 478 479 // --------------------- 480 // Round double arguments before call 481 round_double_arguments(cg->method()); 482 483 #ifndef PRODUCT 484 // bump global counters for calls 485 count_compiled_calls(/*at_method_entry*/ false, cg->is_inline()); 486 487 // Record first part of parsing work for this call 488 parse_histogram()->record_change(); 489 #endif // not PRODUCT 490 491 assert(jvms == this->jvms(), "still operating on the right JVMS"); 492 assert(jvms_in_sync(), "jvms must carry full info into CG"); 493 494 // save across call, for a subsequent cast_not_null. 495 Node* receiver = has_receiver ? argument(0) : NULL; 496 497 // Bump method data counters (We profile *before* the call is made 498 // because exceptions don't return to the call site.) 499 profile_call(receiver); 500 501 JVMState* new_jvms = cg->generate(jvms); 502 if (new_jvms == NULL) { 503 // When inlining attempt fails (e.g., too many arguments), 504 // it may contaminate the current compile state, making it 505 // impossible to pull back and try again. Once we call 506 // cg->generate(), we are committed. If it fails, the whole 507 // compilation task is compromised. 508 if (failing()) return; 509 510 // This can happen if a library intrinsic is available, but refuses 511 // the call site, perhaps because it did not match a pattern the 512 // intrinsic was expecting to optimize. Should always be possible to 513 // get a normal java call that may inline in that case 514 cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false); 515 if ((new_jvms = cg->generate(jvms)) == NULL) { 516 guarantee(failing(), "call failed to generate: calls should work"); 517 return; 518 } 519 } 520 521 if (cg->is_inline()) { 522 // Accumulate has_loops estimate 523 C->set_has_loops(C->has_loops() || cg->method()->has_loops()); 524 C->env()->notice_inlined_method(cg->method()); 525 } 526 527 // Reset parser state from [new_]jvms, which now carries results of the call. 528 // Return value (if any) is already pushed on the stack by the cg. 529 add_exception_states_from(new_jvms); 530 if (new_jvms->map()->control() == top()) { 531 stop_and_kill_map(); 532 } else { 533 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged"); 534 set_jvms(new_jvms); 535 } 536 537 if (!stopped()) { 538 // This was some sort of virtual call, which did a null check for us. 539 // Now we can assert receiver-not-null, on the normal return path. 540 if (receiver != NULL && cg->is_virtual()) { 541 Node* cast = cast_not_null(receiver); 542 // %%% assert(receiver == cast, "should already have cast the receiver"); 543 } 544 545 // Round double result after a call from strict to non-strict code 546 round_double_result(cg->method()); 547 548 ciType* rtype = cg->method()->return_type(); 549 ciType* ctype = declared_signature->return_type(); 550 551 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) { 552 // Be careful here with return types. 553 if (ctype != rtype) { 554 BasicType rt = rtype->basic_type(); 555 BasicType ct = ctype->basic_type(); 556 if (ct == T_VOID) { 557 // It's OK for a method to return a value that is discarded. 558 // The discarding does not require any special action from the caller. 559 // The Java code knows this, at VerifyType.isNullConversion. 560 pop_node(rt); // whatever it was, pop it 561 } else if (rt == T_INT || is_subword_type(rt)) { 562 // Nothing. These cases are handled in lambda form bytecode. 563 assert(ct == T_INT || is_subword_type(ct), err_msg_res("must match: rt=%s, ct=%s", type2name(rt), type2name(ct))); 564 } else if (rt == T_OBJECT || rt == T_ARRAY) { 565 assert(ct == T_OBJECT || ct == T_ARRAY, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct))); 566 if (ctype->is_loaded()) { 567 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); 568 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); 569 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 570 Node* retnode = pop(); 571 Node* cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(), retnode, sig_type)); 572 push(cast_obj); 573 } 574 } 575 } else { 576 assert(rt == ct, err_msg_res("unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct))); 577 // push a zero; it's better than getting an oop/int mismatch 578 pop_node(rt); 579 Node* retnode = zerocon(ct); 580 push_node(ct, retnode); 581 } 582 // Now that the value is well-behaved, continue with the call-site type. 583 rtype = ctype; 584 } 585 } else { 586 // Symbolic resolution enforces the types to be the same. 587 // NOTE: We must relax the assert for unloaded types because two 588 // different ciType instances of the same unloaded class type 589 // can appear to be "loaded" by different loaders (depending on 590 // the accessing class). 591 assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype, 592 err_msg_res("mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name())); 593 } 594 595 // If the return type of the method is not loaded, assert that the 596 // value we got is a null. Otherwise, we need to recompile. 597 if (!rtype->is_loaded()) { 598 #ifndef PRODUCT 599 if (PrintOpto && (Verbose || WizardMode)) { 600 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci()); 601 cg->method()->print_name(); tty->cr(); 602 } 603 #endif 604 if (C->log() != NULL) { 605 C->log()->elem("assert_null reason='return' klass='%d'", 606 C->log()->identify(rtype)); 607 } 608 // If there is going to be a trap, put it at the next bytecode: 609 set_bci(iter().next_bci()); 610 null_assert(peek()); 611 set_bci(iter().cur_bci()); // put it back 612 } 613 } 614 615 // Restart record of parsing work after possible inlining of call 616 #ifndef PRODUCT 617 parse_histogram()->set_initial_state(bc()); 618 #endif 619 } 620 621 //---------------------------catch_call_exceptions----------------------------- 622 // Put a Catch and CatchProj nodes behind a just-created call. 623 // Send their caught exceptions to the proper handler. 624 // This may be used after a call to the rethrow VM stub, 625 // when it is needed to process unloaded exception classes. 626 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) { 627 // Exceptions are delivered through this channel: 628 Node* i_o = this->i_o(); 629 630 // Add a CatchNode. 631 GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1); 632 GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL); 633 GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0); 634 635 for (; !handlers.is_done(); handlers.next()) { 636 ciExceptionHandler* h = handlers.handler(); 637 int h_bci = h->handler_bci(); 638 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass(); 639 // Do not introduce unloaded exception types into the graph: 640 if (!h_klass->is_loaded()) { 641 if (saw_unloaded->contains(h_bci)) { 642 /* We've already seen an unloaded exception with h_bci, 643 so don't duplicate. Duplication will cause the CatchNode to be 644 unnecessarily large. See 4713716. */ 645 continue; 646 } else { 647 saw_unloaded->append(h_bci); 648 } 649 } 650 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass); 651 // (We use make_from_klass because it respects UseUniqueSubclasses.) 652 h_extype = h_extype->join(TypeInstPtr::NOTNULL); 653 assert(!h_extype->empty(), "sanity"); 654 // Note: It's OK if the BCIs repeat themselves. 655 bcis->append(h_bci); 656 extypes->append(h_extype); 657 } 658 659 int len = bcis->length(); 660 CatchNode *cn = new (C) CatchNode(control(), i_o, len+1); 661 Node *catch_ = _gvn.transform(cn); 662 663 // now branch with the exception state to each of the (potential) 664 // handlers 665 for(int i=0; i < len; i++) { 666 // Setup JVM state to enter the handler. 667 PreserveJVMState pjvms(this); 668 // Locals are just copied from before the call. 669 // Get control from the CatchNode. 670 int handler_bci = bcis->at(i); 671 Node* ctrl = _gvn.transform( new (C) CatchProjNode(catch_, i+1,handler_bci)); 672 // This handler cannot happen? 673 if (ctrl == top()) continue; 674 set_control(ctrl); 675 676 // Create exception oop 677 const TypeInstPtr* extype = extypes->at(i)->is_instptr(); 678 Node *ex_oop = _gvn.transform(new (C) CreateExNode(extypes->at(i), ctrl, i_o)); 679 680 // Handle unloaded exception classes. 681 if (saw_unloaded->contains(handler_bci)) { 682 // An unloaded exception type is coming here. Do an uncommon trap. 683 #ifndef PRODUCT 684 // We do not expect the same handler bci to take both cold unloaded 685 // and hot loaded exceptions. But, watch for it. 686 if ((Verbose || WizardMode) && extype->is_loaded()) { 687 tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci()); 688 method()->print_name(); tty->cr(); 689 } else if (PrintOpto && (Verbose || WizardMode)) { 690 tty->print("Bailing out on unloaded exception type "); 691 extype->klass()->print_name(); 692 tty->print(" at bci:%d in ", bci()); 693 method()->print_name(); tty->cr(); 694 } 695 #endif 696 // Emit an uncommon trap instead of processing the block. 697 set_bci(handler_bci); 698 push_ex_oop(ex_oop); 699 uncommon_trap(Deoptimization::Reason_unloaded, 700 Deoptimization::Action_reinterpret, 701 extype->klass(), "!loaded exception"); 702 set_bci(iter().cur_bci()); // put it back 703 continue; 704 } 705 706 // go to the exception handler 707 if (handler_bci < 0) { // merge with corresponding rethrow node 708 throw_to_exit(make_exception_state(ex_oop)); 709 } else { // Else jump to corresponding handle 710 push_ex_oop(ex_oop); // Clear stack and push just the oop. 711 merge_exception(handler_bci); 712 } 713 } 714 715 // The first CatchProj is for the normal return. 716 // (Note: If this is a call to rethrow_Java, this node goes dead.) 717 set_control(_gvn.transform( new (C) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci))); 718 } 719 720 721 //----------------------------catch_inline_exceptions-------------------------- 722 // Handle all exceptions thrown by an inlined method or individual bytecode. 723 // Common case 1: we have no handler, so all exceptions merge right into 724 // the rethrow case. 725 // Case 2: we have some handlers, with loaded exception klasses that have 726 // no subklasses. We do a Deutsch-Shiffman style type-check on the incoming 727 // exception oop and branch to the handler directly. 728 // Case 3: We have some handlers with subklasses or are not loaded at 729 // compile-time. We have to call the runtime to resolve the exception. 730 // So we insert a RethrowCall and all the logic that goes with it. 731 void Parse::catch_inline_exceptions(SafePointNode* ex_map) { 732 // Caller is responsible for saving away the map for normal control flow! 733 assert(stopped(), "call set_map(NULL) first"); 734 assert(method()->has_exception_handlers(), "don't come here w/o work to do"); 735 736 Node* ex_node = saved_ex_oop(ex_map); 737 if (ex_node == top()) { 738 // No action needed. 739 return; 740 } 741 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr(); 742 NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr")); 743 if (ex_type == NULL) 744 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr(); 745 746 // determine potential exception handlers 747 ciExceptionHandlerStream handlers(method(), bci(), 748 ex_type->klass()->as_instance_klass(), 749 ex_type->klass_is_exact()); 750 751 // Start executing from the given throw state. (Keep its stack, for now.) 752 // Get the exception oop as known at compile time. 753 ex_node = use_exception_state(ex_map); 754 755 // Get the exception oop klass from its header 756 Node* ex_klass_node = NULL; 757 if (has_ex_handler() && !ex_type->klass_is_exact()) { 758 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes()); 759 ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) ); 760 761 // Compute the exception klass a little more cleverly. 762 // Obvious solution is to simple do a LoadKlass from the 'ex_node'. 763 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for 764 // each arm of the Phi. If I know something clever about the exceptions 765 // I'm loading the class from, I can replace the LoadKlass with the 766 // klass constant for the exception oop. 767 if( ex_node->is_Phi() ) { 768 ex_klass_node = new (C) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT ); 769 for( uint i = 1; i < ex_node->req(); i++ ) { 770 Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() ); 771 Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) ); 772 ex_klass_node->init_req( i, k ); 773 } 774 _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT); 775 776 } 777 } 778 779 // Scan the exception table for applicable handlers. 780 // If none, we can call rethrow() and be done! 781 // If precise (loaded with no subklasses), insert a D.S. style 782 // pointer compare to the correct handler and loop back. 783 // If imprecise, switch to the Rethrow VM-call style handling. 784 785 int remaining = handlers.count_remaining(); 786 787 // iterate through all entries sequentially 788 for (;!handlers.is_done(); handlers.next()) { 789 ciExceptionHandler* handler = handlers.handler(); 790 791 if (handler->is_rethrow()) { 792 // If we fell off the end of the table without finding an imprecise 793 // exception klass (and without finding a generic handler) then we 794 // know this exception is not handled in this method. We just rethrow 795 // the exception into the caller. 796 throw_to_exit(make_exception_state(ex_node)); 797 return; 798 } 799 800 // exception handler bci range covers throw_bci => investigate further 801 int handler_bci = handler->handler_bci(); 802 803 if (remaining == 1) { 804 push_ex_oop(ex_node); // Push exception oop for handler 805 #ifndef PRODUCT 806 if (PrintOpto && WizardMode) { 807 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci); 808 } 809 #endif 810 merge_exception(handler_bci); // jump to handler 811 return; // No more handling to be done here! 812 } 813 814 // Get the handler's klass 815 ciInstanceKlass* klass = handler->catch_klass(); 816 817 if (!klass->is_loaded()) { // klass is not loaded? 818 // fall through into catch_call_exceptions which will emit a 819 // handler with an uncommon trap. 820 break; 821 } 822 823 if (klass->is_interface()) // should not happen, but... 824 break; // bail out 825 826 // Check the type of the exception against the catch type 827 const TypeKlassPtr *tk = TypeKlassPtr::make(klass); 828 Node* con = _gvn.makecon(tk); 829 Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con); 830 if (!stopped()) { 831 PreserveJVMState pjvms(this); 832 const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr(); 833 assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness"); 834 Node* ex_oop = _gvn.transform(new (C) CheckCastPPNode(control(), ex_node, tinst)); 835 push_ex_oop(ex_oop); // Push exception oop for handler 836 #ifndef PRODUCT 837 if (PrintOpto && WizardMode) { 838 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci); 839 klass->print_name(); 840 tty->cr(); 841 } 842 #endif 843 merge_exception(handler_bci); 844 } 845 set_control(not_subtype_ctrl); 846 847 // Come here if exception does not match handler. 848 // Carry on with more handler checks. 849 --remaining; 850 } 851 852 assert(!stopped(), "you should return if you finish the chain"); 853 854 // Oops, need to call into the VM to resolve the klasses at runtime. 855 // Note: This call must not deoptimize, since it is not a real at this bci! 856 kill_dead_locals(); 857 858 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW, 859 OptoRuntime::rethrow_Type(), 860 OptoRuntime::rethrow_stub(), 861 NULL, NULL, 862 ex_node); 863 864 // Rethrow is a pure call, no side effects, only a result. 865 // The result cannot be allocated, so we use I_O 866 867 // Catch exceptions from the rethrow 868 catch_call_exceptions(handlers); 869 } 870 871 872 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.) 873 874 875 #ifndef PRODUCT 876 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) { 877 if( CountCompiledCalls ) { 878 if( at_method_entry ) { 879 // bump invocation counter if top method (for statistics) 880 if (CountCompiledCalls && depth() == 1) { 881 const TypePtr* addr_type = TypeMetadataPtr::make(method()); 882 Node* adr1 = makecon(addr_type); 883 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset())); 884 increment_counter(adr2); 885 } 886 } else if (is_inline) { 887 switch (bc()) { 888 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break; 889 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break; 890 case Bytecodes::_invokestatic: 891 case Bytecodes::_invokedynamic: 892 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break; 893 default: fatal("unexpected call bytecode"); 894 } 895 } else { 896 switch (bc()) { 897 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break; 898 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break; 899 case Bytecodes::_invokestatic: 900 case Bytecodes::_invokedynamic: 901 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break; 902 default: fatal("unexpected call bytecode"); 903 } 904 } 905 } 906 } 907 #endif //PRODUCT 908 909 910 ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, 911 ciMethod* callee, const TypeOopPtr* receiver_type, 912 bool is_virtual, 913 bool& call_does_dispatch, int& vtable_index) { 914 // Set default values for out-parameters. 915 call_does_dispatch = true; 916 vtable_index = Method::invalid_vtable_index; 917 918 // Choose call strategy. 919 ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee, receiver_type); 920 921 // Have the call been sufficiently improved such that it is no longer a virtual? 922 if (optimized_virtual_method != NULL) { 923 callee = optimized_virtual_method; 924 call_does_dispatch = false; 925 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) { 926 // We can make a vtable call at this site 927 vtable_index = callee->resolve_vtable_index(caller->holder(), klass); 928 } 929 return callee; 930 } 931 932 // Identify possible target method and inlining style 933 ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, 934 ciMethod* callee, const TypeOopPtr* receiver_type) { 935 // only use for virtual or interface calls 936 937 // If it is obviously final, do not bother to call find_monomorphic_target, 938 // because the class hierarchy checks are not needed, and may fail due to 939 // incompletely loaded classes. Since we do our own class loading checks 940 // in this module, we may confidently bind to any method. 941 if (callee->can_be_statically_bound()) { 942 return callee; 943 } 944 945 // Attempt to improve the receiver 946 bool actual_receiver_is_exact = false; 947 ciInstanceKlass* actual_receiver = klass; 948 if (receiver_type != NULL) { 949 // Array methods are all inherited from Object, and are monomorphic. 950 if (receiver_type->isa_aryptr() && 951 callee->holder() == env()->Object_klass()) { 952 return callee; 953 } 954 955 // All other interesting cases are instance klasses. 956 if (!receiver_type->isa_instptr()) { 957 return NULL; 958 } 959 960 ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass(); 961 if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() && 962 (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) { 963 // ikl is a same or better type than the original actual_receiver, 964 // e.g. static receiver from bytecodes. 965 actual_receiver = ikl; 966 // Is the actual_receiver exact? 967 actual_receiver_is_exact = receiver_type->klass_is_exact(); 968 } 969 } 970 971 ciInstanceKlass* calling_klass = caller->holder(); 972 ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver); 973 if (cha_monomorphic_target != NULL) { 974 assert(!cha_monomorphic_target->is_abstract(), ""); 975 // Look at the method-receiver type. Does it add "too much information"? 976 ciKlass* mr_klass = cha_monomorphic_target->holder(); 977 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass); 978 if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) { 979 // Calling this method would include an implicit cast to its holder. 980 // %%% Not yet implemented. Would throw minor asserts at present. 981 // %%% The most common wins are already gained by +UseUniqueSubclasses. 982 // To fix, put the higher_equal check at the call of this routine, 983 // and add a CheckCastPP to the receiver. 984 if (TraceDependencies) { 985 tty->print_cr("found unique CHA method, but could not cast up"); 986 tty->print(" method = "); 987 cha_monomorphic_target->print(); 988 tty->cr(); 989 } 990 if (log() != NULL) { 991 log()->elem("missed_CHA_opportunity klass='%d' method='%d'", 992 log()->identify(klass), 993 log()->identify(cha_monomorphic_target)); 994 } 995 cha_monomorphic_target = NULL; 996 } 997 } 998 if (cha_monomorphic_target != NULL) { 999 // Hardwiring a virtual. 1000 // If we inlined because CHA revealed only a single target method, 1001 // then we are dependent on that target method not getting overridden 1002 // by dynamic class loading. Be sure to test the "static" receiver 1003 // dest_method here, as opposed to the actual receiver, which may 1004 // falsely lead us to believe that the receiver is final or private. 1005 dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target); 1006 return cha_monomorphic_target; 1007 } 1008 1009 // If the type is exact, we can still bind the method w/o a vcall. 1010 // (This case comes after CHA so we can see how much extra work it does.) 1011 if (actual_receiver_is_exact) { 1012 // In case of evolution, there is a dependence on every inlined method, since each 1013 // such method can be changed when its class is redefined. 1014 ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver); 1015 if (exact_method != NULL) { 1016 #ifndef PRODUCT 1017 if (PrintOpto) { 1018 tty->print(" Calling method via exact type @%d --- ", bci); 1019 exact_method->print_name(); 1020 tty->cr(); 1021 } 1022 #endif 1023 return exact_method; 1024 } 1025 } 1026 1027 return NULL; 1028 }