1 /* 2 * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_doCall.cpp.incl" 27 28 #ifndef PRODUCT 29 void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) { 30 if (TraceTypeProfile || PrintInlining || PrintOptoInlining) { 31 tty->print(" "); 32 for( int i = 0; i < depth; i++ ) tty->print(" "); 33 if (!PrintOpto) { 34 method->print_short_name(); 35 tty->print(" ->"); 36 } 37 tty->print(" @ %d ", bci); 38 prof_method->print_short_name(); 39 tty->print(" >>TypeProfile (%d/%d counts) = ", receiver_count, site_count); 40 prof_klass->name()->print_symbol(); 41 tty->print_cr(" (%d bytes)", prof_method->code_size()); 42 } 43 } 44 #endif 45 46 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, 47 JVMState* jvms, bool allow_inline, 48 float prof_factor) { 49 CallGenerator* cg; 50 51 // Dtrace currently doesn't work unless all calls are vanilla 52 if (env()->dtrace_method_probes()) { 53 allow_inline = false; 54 } 55 56 // Note: When we get profiling during stage-1 compiles, we want to pull 57 // from more specific profile data which pertains to this inlining. 58 // Right now, ignore the information in jvms->caller(), and do method[bci]. 59 ciCallProfile profile = jvms->method()->call_profile_at_bci(jvms->bci()); 60 61 // See how many times this site has been invoked. 62 int site_count = profile.count(); 63 int receiver_count = -1; 64 if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) { 65 // Receivers in the profile structure are ordered by call counts 66 // so that the most called (major) receiver is profile.receiver(0). 67 receiver_count = profile.receiver_count(0); 68 } 69 70 CompileLog* log = this->log(); 71 if (log != NULL) { 72 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; 73 int r2id = (profile.morphism() == 2)? log->identify(profile.receiver(1)):-1; 74 log->begin_elem("call method='%d' count='%d' prof_factor='%g'", 75 log->identify(call_method), site_count, prof_factor); 76 if (call_is_virtual) log->print(" virtual='1'"); 77 if (allow_inline) log->print(" inline='1'"); 78 if (receiver_count >= 0) { 79 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count); 80 if (profile.has_receiver(1)) { 81 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1)); 82 } 83 } 84 log->end_elem(); 85 } 86 87 // Special case the handling of certain common, profitable library 88 // methods. If these methods are replaced with specialized code, 89 // then we return it as the inlined version of the call. 90 // We do this before the strict f.p. check below because the 91 // intrinsics handle strict f.p. correctly. 92 if (allow_inline) { 93 cg = find_intrinsic(call_method, call_is_virtual); 94 if (cg != NULL) return cg; 95 } 96 97 // Do not inline strict fp into non-strict code, or the reverse 98 bool caller_method_is_strict = jvms->method()->is_strict(); 99 if( caller_method_is_strict ^ call_method->is_strict() ) { 100 allow_inline = false; 101 } 102 103 // Attempt to inline... 104 if (allow_inline) { 105 // The profile data is only partly attributable to this caller, 106 // scale back the call site information. 107 float past_uses = jvms->method()->scale_count(site_count, prof_factor); 108 // This is the number of times we expect the call code to be used. 109 float expected_uses = past_uses; 110 111 // Try inlining a bytecoded method: 112 if (!call_is_virtual) { 113 InlineTree* ilt; 114 if (UseOldInlining) { 115 ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method()); 116 } else { 117 // Make a disembodied, stateless ILT. 118 // TO DO: When UseOldInlining is removed, copy the ILT code elsewhere. 119 float site_invoke_ratio = prof_factor; 120 // Note: ilt is for the root of this parse, not the present call site. 121 ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, 0); 122 } 123 WarmCallInfo scratch_ci; 124 if (!UseOldInlining) 125 scratch_ci.init(jvms, call_method, profile, prof_factor); 126 WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci); 127 assert(ci != &scratch_ci, "do not let this pointer escape"); 128 bool allow_inline = (ci != NULL && !ci->is_cold()); 129 bool require_inline = (allow_inline && ci->is_hot()); 130 131 if (allow_inline) { 132 CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses); 133 if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) { 134 // Delay the inlining of this method to give us the 135 // opportunity to perform some high level optimizations 136 // first. 137 return CallGenerator::for_late_inline(call_method, cg); 138 } 139 if (cg == NULL) { 140 // Fall through. 141 } else if (require_inline || !InlineWarmCalls) { 142 return cg; 143 } else { 144 CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor); 145 return CallGenerator::for_warm_call(ci, cold_cg, cg); 146 } 147 } 148 } 149 150 // Try using the type profile. 151 if (call_is_virtual && site_count > 0 && receiver_count > 0) { 152 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count. 153 bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent); 154 ciMethod* receiver_method = NULL; 155 if (have_major_receiver || profile.morphism() == 1 || 156 (profile.morphism() == 2 && UseBimorphicInlining)) { 157 // receiver_method = profile.method(); 158 // Profiles do not suggest methods now. Look it up in the major receiver. 159 receiver_method = call_method->resolve_invoke(jvms->method()->holder(), 160 profile.receiver(0)); 161 } 162 if (receiver_method != NULL) { 163 // The single majority receiver sufficiently outweighs the minority. 164 CallGenerator* hit_cg = this->call_generator(receiver_method, 165 vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor); 166 if (hit_cg != NULL) { 167 // Look up second receiver. 168 CallGenerator* next_hit_cg = NULL; 169 ciMethod* next_receiver_method = NULL; 170 if (profile.morphism() == 2 && UseBimorphicInlining) { 171 next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(), 172 profile.receiver(1)); 173 if (next_receiver_method != NULL) { 174 next_hit_cg = this->call_generator(next_receiver_method, 175 vtable_index, !call_is_virtual, jvms, 176 allow_inline, prof_factor); 177 if (next_hit_cg != NULL && !next_hit_cg->is_inline() && 178 have_major_receiver && UseOnlyInlinedBimorphic) { 179 // Skip if we can't inline second receiver's method 180 next_hit_cg = NULL; 181 } 182 } 183 } 184 CallGenerator* miss_cg; 185 if (( profile.morphism() == 1 || 186 (profile.morphism() == 2 && next_hit_cg != NULL) ) && 187 188 !too_many_traps(Deoptimization::Reason_class_check) 189 190 // Check only total number of traps per method to allow 191 // the transition from monomorphic to bimorphic case between 192 // compilations without falling into virtual call. 193 // A monomorphic case may have the class_check trap flag is set 194 // due to the time gap between the uncommon trap processing 195 // when flags are set in MDO and the call site bytecode execution 196 // in Interpreter when MDO counters are updated. 197 // There was also class_check trap in monomorphic case due to 198 // the bug 6225440. 199 200 ) { 201 // Generate uncommon trap for class check failure path 202 // in case of monomorphic or bimorphic virtual call site. 203 miss_cg = CallGenerator::for_uncommon_trap(call_method, 204 Deoptimization::Reason_class_check, 205 Deoptimization::Action_maybe_recompile); 206 } else { 207 // Generate virtual call for class check failure path 208 // in case of polymorphic virtual call site. 209 miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index); 210 } 211 if (miss_cg != NULL) { 212 if (next_hit_cg != NULL) { 213 NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1))); 214 // We don't need to record dependency on a receiver here and below. 215 // Whenever we inline, the dependency is added by Parse::Parse(). 216 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX); 217 } 218 if (miss_cg != NULL) { 219 NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count)); 220 cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0)); 221 if (cg != NULL) return cg; 222 } 223 } 224 } 225 } 226 } 227 } 228 229 // Do MethodHandle calls. 230 if (call_method->is_method_handle_invoke()) { 231 if (jvms->method()->java_code_at_bci(jvms->bci()) != Bytecodes::_invokedynamic) { 232 GraphKit kit(jvms); 233 Node* n = kit.argument(0); 234 235 if (n->Opcode() == Op_ConP) { 236 const TypeOopPtr* oop_ptr = n->bottom_type()->is_oopptr(); 237 ciObject* const_oop = oop_ptr->const_oop(); 238 ciMethodHandle* method_handle = const_oop->as_method_handle(); 239 240 // Set the actually called method to have access to the class 241 // and signature in the MethodHandleCompiler. 242 method_handle->set_callee(call_method); 243 244 // Get an adapter for the MethodHandle. 245 ciMethod* target_method = method_handle->get_method_handle_adapter(); 246 247 CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); 248 if (hit_cg != NULL && hit_cg->is_inline()) 249 return hit_cg; 250 } 251 252 return CallGenerator::for_direct_call(call_method); 253 } 254 else { 255 // Get the MethodHandle from the CallSite. 256 ciMethod* caller_method = jvms->method(); 257 ciBytecodeStream str(caller_method); 258 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. 259 ciCallSite* call_site = str.get_call_site(); 260 ciMethodHandle* method_handle = call_site->get_target(); 261 262 // Set the actually called method to have access to the class 263 // and signature in the MethodHandleCompiler. 264 method_handle->set_callee(call_method); 265 266 // Get an adapter for the MethodHandle. 267 ciMethod* target_method = method_handle->get_invokedynamic_adapter(); 268 269 CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); 270 if (hit_cg != NULL && hit_cg->is_inline()) { 271 CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method); 272 return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor); 273 } 274 275 // If something failed, generate a normal dynamic call. 276 return CallGenerator::for_dynamic_call(call_method); 277 } 278 } 279 280 // There was no special inlining tactic, or it bailed out. 281 // Use a more generic tactic, like a simple call. 282 if (call_is_virtual) { 283 return CallGenerator::for_virtual_call(call_method, vtable_index); 284 } else { 285 // Class Hierarchy Analysis or Type Profile reveals a unique target, 286 // or it is a static or special call. 287 return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms)); 288 } 289 } 290 291 // Return true for methods that shouldn't be inlined early so that 292 // they are easier to analyze and optimize as intrinsics. 293 bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) { 294 if (has_stringbuilder()) { 295 296 if ((call_method->holder() == C->env()->StringBuilder_klass() || 297 call_method->holder() == C->env()->StringBuffer_klass()) && 298 (jvms->method()->holder() == C->env()->StringBuilder_klass() || 299 jvms->method()->holder() == C->env()->StringBuffer_klass())) { 300 // Delay SB calls only when called from non-SB code 301 return false; 302 } 303 304 switch (call_method->intrinsic_id()) { 305 case vmIntrinsics::_StringBuilder_void: 306 case vmIntrinsics::_StringBuilder_int: 307 case vmIntrinsics::_StringBuilder_String: 308 case vmIntrinsics::_StringBuilder_append_char: 309 case vmIntrinsics::_StringBuilder_append_int: 310 case vmIntrinsics::_StringBuilder_append_String: 311 case vmIntrinsics::_StringBuilder_toString: 312 case vmIntrinsics::_StringBuffer_void: 313 case vmIntrinsics::_StringBuffer_int: 314 case vmIntrinsics::_StringBuffer_String: 315 case vmIntrinsics::_StringBuffer_append_char: 316 case vmIntrinsics::_StringBuffer_append_int: 317 case vmIntrinsics::_StringBuffer_append_String: 318 case vmIntrinsics::_StringBuffer_toString: 319 case vmIntrinsics::_Integer_toString: 320 return true; 321 322 case vmIntrinsics::_String_String: 323 { 324 Node* receiver = jvms->map()->in(jvms->argoff() + 1); 325 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) { 326 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava(); 327 ciMethod* m = csj->method(); 328 if (m != NULL && 329 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString || 330 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString)) 331 // Delay String.<init>(new SB()) 332 return true; 333 } 334 return false; 335 } 336 337 default: 338 return false; 339 } 340 } 341 return false; 342 } 343 344 345 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link 346 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) { 347 // Additional inputs to consider... 348 // bc = bc() 349 // caller = method() 350 // iter().get_method_holder_index() 351 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" ); 352 // Interface classes can be loaded & linked and never get around to 353 // being initialized. Uncommon-trap for not-initialized static or 354 // v-calls. Let interface calls happen. 355 ciInstanceKlass* holder_klass = dest_method->holder(); 356 if (!holder_klass->is_initialized() && 357 !holder_klass->is_interface()) { 358 uncommon_trap(Deoptimization::Reason_uninitialized, 359 Deoptimization::Action_reinterpret, 360 holder_klass); 361 return true; 362 } 363 364 assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility"); 365 return false; 366 } 367 368 369 //------------------------------do_call---------------------------------------- 370 // Handle your basic call. Inline if we can & want to, else just setup call. 371 void Parse::do_call() { 372 // It's likely we are going to add debug info soon. 373 // Also, if we inline a guy who eventually needs debug info for this JVMS, 374 // our contribution to it is cleaned up right here. 375 kill_dead_locals(); 376 377 // Set frequently used booleans 378 bool is_virtual = bc() == Bytecodes::_invokevirtual; 379 bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; 380 bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial; 381 bool is_invokedynamic = bc() == Bytecodes::_invokedynamic; 382 383 // Find target being called 384 bool will_link; 385 ciMethod* dest_method = iter().get_method(will_link); 386 ciInstanceKlass* holder_klass = dest_method->holder(); 387 ciKlass* holder = iter().get_declared_method_holder(); 388 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 389 390 int nargs = dest_method->arg_size(); 391 if (is_invokedynamic) nargs -= 1; 392 393 // uncommon-trap when callee is unloaded, uninitialized or will not link 394 // bailout when too many arguments for register representation 395 if (!will_link || can_not_compile_call_site(dest_method, klass)) { 396 #ifndef PRODUCT 397 if (PrintOpto && (Verbose || WizardMode)) { 398 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); 399 dest_method->print_name(); tty->cr(); 400 } 401 #endif 402 return; 403 } 404 assert(holder_klass->is_loaded(), ""); 405 assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); 406 // Note: this takes into account invokeinterface of methods declared in java/lang/Object, 407 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces 408 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); 409 // Note: In the absence of miranda methods, an abstract class K can perform 410 // an invokevirtual directly on an interface method I.m if K implements I. 411 412 // --------------------- 413 // Does Class Hierarchy Analysis reveal only a single target of a v-call? 414 // Then we may inline or make a static call, but become dependent on there being only 1 target. 415 // Does the call-site type profile reveal only one receiver? 416 // Then we may introduce a run-time check and inline on the path where it succeeds. 417 // The other path may uncommon_trap, check for another receiver, or do a v-call. 418 419 // Choose call strategy. 420 bool call_is_virtual = is_virtual_or_interface; 421 int vtable_index = methodOopDesc::invalid_vtable_index; 422 ciMethod* call_method = dest_method; 423 424 // Try to get the most accurate receiver type 425 if (is_virtual_or_interface) { 426 Node* receiver_node = stack(sp() - nargs); 427 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); 428 ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type); 429 430 // Have the call been sufficiently improved such that it is no longer a virtual? 431 if (optimized_virtual_method != NULL) { 432 call_method = optimized_virtual_method; 433 call_is_virtual = false; 434 } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) { 435 // We can make a vtable call at this site 436 vtable_index = call_method->resolve_vtable_index(method()->holder(), klass); 437 } 438 } 439 440 // Note: It's OK to try to inline a virtual call. 441 // The call generator will not attempt to inline a polymorphic call 442 // unless it knows how to optimize the receiver dispatch. 443 bool try_inline = (C->do_inlining() || InlineAccessors); 444 445 // --------------------- 446 inc_sp(- nargs); // Temporarily pop args for JVM state of call 447 JVMState* jvms = sync_jvms(); 448 449 // --------------------- 450 // Decide call tactic. 451 // This call checks with CHA, the interpreter profile, intrinsics table, etc. 452 // It decides whether inlining is desirable or not. 453 CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); 454 455 // --------------------- 456 // Round double arguments before call 457 round_double_arguments(dest_method); 458 459 #ifndef PRODUCT 460 // bump global counters for calls 461 count_compiled_calls(false/*at_method_entry*/, cg->is_inline()); 462 463 // Record first part of parsing work for this call 464 parse_histogram()->record_change(); 465 #endif // not PRODUCT 466 467 assert(jvms == this->jvms(), "still operating on the right JVMS"); 468 assert(jvms_in_sync(), "jvms must carry full info into CG"); 469 470 // save across call, for a subsequent cast_not_null. 471 Node* receiver = has_receiver ? argument(0) : NULL; 472 473 // Bump method data counters (We profile *before* the call is made 474 // because exceptions don't return to the call site.) 475 profile_call(receiver); 476 477 JVMState* new_jvms; 478 if ((new_jvms = cg->generate(jvms)) == NULL) { 479 // When inlining attempt fails (e.g., too many arguments), 480 // it may contaminate the current compile state, making it 481 // impossible to pull back and try again. Once we call 482 // cg->generate(), we are committed. If it fails, the whole 483 // compilation task is compromised. 484 if (failing()) return; 485 #ifndef PRODUCT 486 if (PrintOpto || PrintOptoInlining || PrintInlining) { 487 // Only one fall-back, so if an intrinsic fails, ignore any bytecodes. 488 if (cg->is_intrinsic() && call_method->code_size() > 0) { 489 tty->print("Bailed out of intrinsic, will not inline: "); 490 call_method->print_name(); tty->cr(); 491 } 492 } 493 #endif 494 // This can happen if a library intrinsic is available, but refuses 495 // the call site, perhaps because it did not match a pattern the 496 // intrinsic was expecting to optimize. The fallback position is 497 // to call out-of-line. 498 try_inline = false; // Inline tactic bailed out. 499 cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); 500 if ((new_jvms = cg->generate(jvms)) == NULL) { 501 guarantee(failing(), "call failed to generate: calls should work"); 502 return; 503 } 504 } 505 506 if (cg->is_inline()) { 507 // Accumulate has_loops estimate 508 C->set_has_loops(C->has_loops() || call_method->has_loops()); 509 C->env()->notice_inlined_method(call_method); 510 } 511 512 // Reset parser state from [new_]jvms, which now carries results of the call. 513 // Return value (if any) is already pushed on the stack by the cg. 514 add_exception_states_from(new_jvms); 515 if (new_jvms->map()->control() == top()) { 516 stop_and_kill_map(); 517 } else { 518 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged"); 519 set_jvms(new_jvms); 520 } 521 522 if (!stopped()) { 523 // This was some sort of virtual call, which did a null check for us. 524 // Now we can assert receiver-not-null, on the normal return path. 525 if (receiver != NULL && cg->is_virtual()) { 526 Node* cast = cast_not_null(receiver); 527 // %%% assert(receiver == cast, "should already have cast the receiver"); 528 } 529 530 // Round double result after a call from strict to non-strict code 531 round_double_result(dest_method); 532 533 // If the return type of the method is not loaded, assert that the 534 // value we got is a null. Otherwise, we need to recompile. 535 if (!dest_method->return_type()->is_loaded()) { 536 #ifndef PRODUCT 537 if (PrintOpto && (Verbose || WizardMode)) { 538 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci()); 539 dest_method->print_name(); tty->cr(); 540 } 541 #endif 542 if (C->log() != NULL) { 543 C->log()->elem("assert_null reason='return' klass='%d'", 544 C->log()->identify(dest_method->return_type())); 545 } 546 // If there is going to be a trap, put it at the next bytecode: 547 set_bci(iter().next_bci()); 548 do_null_assert(peek(), T_OBJECT); 549 set_bci(iter().cur_bci()); // put it back 550 } 551 } 552 553 // Restart record of parsing work after possible inlining of call 554 #ifndef PRODUCT 555 parse_histogram()->set_initial_state(bc()); 556 #endif 557 } 558 559 //---------------------------catch_call_exceptions----------------------------- 560 // Put a Catch and CatchProj nodes behind a just-created call. 561 // Send their caught exceptions to the proper handler. 562 // This may be used after a call to the rethrow VM stub, 563 // when it is needed to process unloaded exception classes. 564 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) { 565 // Exceptions are delivered through this channel: 566 Node* i_o = this->i_o(); 567 568 // Add a CatchNode. 569 GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1); 570 GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL); 571 GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0); 572 573 for (; !handlers.is_done(); handlers.next()) { 574 ciExceptionHandler* h = handlers.handler(); 575 int h_bci = h->handler_bci(); 576 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass(); 577 // Do not introduce unloaded exception types into the graph: 578 if (!h_klass->is_loaded()) { 579 if (saw_unloaded->contains(h_bci)) { 580 /* We've already seen an unloaded exception with h_bci, 581 so don't duplicate. Duplication will cause the CatchNode to be 582 unnecessarily large. See 4713716. */ 583 continue; 584 } else { 585 saw_unloaded->append(h_bci); 586 } 587 } 588 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass); 589 // (We use make_from_klass because it respects UseUniqueSubclasses.) 590 h_extype = h_extype->join(TypeInstPtr::NOTNULL); 591 assert(!h_extype->empty(), "sanity"); 592 // Note: It's OK if the BCIs repeat themselves. 593 bcis->append(h_bci); 594 extypes->append(h_extype); 595 } 596 597 int len = bcis->length(); 598 CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1); 599 Node *catch_ = _gvn.transform(cn); 600 601 // now branch with the exception state to each of the (potential) 602 // handlers 603 for(int i=0; i < len; i++) { 604 // Setup JVM state to enter the handler. 605 PreserveJVMState pjvms(this); 606 // Locals are just copied from before the call. 607 // Get control from the CatchNode. 608 int handler_bci = bcis->at(i); 609 Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci)); 610 // This handler cannot happen? 611 if (ctrl == top()) continue; 612 set_control(ctrl); 613 614 // Create exception oop 615 const TypeInstPtr* extype = extypes->at(i)->is_instptr(); 616 Node *ex_oop = _gvn.transform(new (C, 2) CreateExNode(extypes->at(i), ctrl, i_o)); 617 618 // Handle unloaded exception classes. 619 if (saw_unloaded->contains(handler_bci)) { 620 // An unloaded exception type is coming here. Do an uncommon trap. 621 #ifndef PRODUCT 622 // We do not expect the same handler bci to take both cold unloaded 623 // and hot loaded exceptions. But, watch for it. 624 if (extype->is_loaded()) { 625 tty->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in "); 626 method()->print_name(); tty->cr(); 627 } else if (PrintOpto && (Verbose || WizardMode)) { 628 tty->print("Bailing out on unloaded exception type "); 629 extype->klass()->print_name(); 630 tty->print(" at bci:%d in ", bci()); 631 method()->print_name(); tty->cr(); 632 } 633 #endif 634 // Emit an uncommon trap instead of processing the block. 635 set_bci(handler_bci); 636 push_ex_oop(ex_oop); 637 uncommon_trap(Deoptimization::Reason_unloaded, 638 Deoptimization::Action_reinterpret, 639 extype->klass(), "!loaded exception"); 640 set_bci(iter().cur_bci()); // put it back 641 continue; 642 } 643 644 // go to the exception handler 645 if (handler_bci < 0) { // merge with corresponding rethrow node 646 throw_to_exit(make_exception_state(ex_oop)); 647 } else { // Else jump to corresponding handle 648 push_ex_oop(ex_oop); // Clear stack and push just the oop. 649 merge_exception(handler_bci); 650 } 651 } 652 653 // The first CatchProj is for the normal return. 654 // (Note: If this is a call to rethrow_Java, this node goes dead.) 655 set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci))); 656 } 657 658 659 //----------------------------catch_inline_exceptions-------------------------- 660 // Handle all exceptions thrown by an inlined method or individual bytecode. 661 // Common case 1: we have no handler, so all exceptions merge right into 662 // the rethrow case. 663 // Case 2: we have some handlers, with loaded exception klasses that have 664 // no subklasses. We do a Deutsch-Shiffman style type-check on the incoming 665 // exception oop and branch to the handler directly. 666 // Case 3: We have some handlers with subklasses or are not loaded at 667 // compile-time. We have to call the runtime to resolve the exception. 668 // So we insert a RethrowCall and all the logic that goes with it. 669 void Parse::catch_inline_exceptions(SafePointNode* ex_map) { 670 // Caller is responsible for saving away the map for normal control flow! 671 assert(stopped(), "call set_map(NULL) first"); 672 assert(method()->has_exception_handlers(), "don't come here w/o work to do"); 673 674 Node* ex_node = saved_ex_oop(ex_map); 675 if (ex_node == top()) { 676 // No action needed. 677 return; 678 } 679 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr(); 680 NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr")); 681 if (ex_type == NULL) 682 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr(); 683 684 // determine potential exception handlers 685 ciExceptionHandlerStream handlers(method(), bci(), 686 ex_type->klass()->as_instance_klass(), 687 ex_type->klass_is_exact()); 688 689 // Start executing from the given throw state. (Keep its stack, for now.) 690 // Get the exception oop as known at compile time. 691 ex_node = use_exception_state(ex_map); 692 693 // Get the exception oop klass from its header 694 Node* ex_klass_node = NULL; 695 if (has_ex_handler() && !ex_type->klass_is_exact()) { 696 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes()); 697 ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) ); 698 699 // Compute the exception klass a little more cleverly. 700 // Obvious solution is to simple do a LoadKlass from the 'ex_node'. 701 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for 702 // each arm of the Phi. If I know something clever about the exceptions 703 // I'm loading the class from, I can replace the LoadKlass with the 704 // klass constant for the exception oop. 705 if( ex_node->is_Phi() ) { 706 ex_klass_node = new (C, ex_node->req()) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT ); 707 for( uint i = 1; i < ex_node->req(); i++ ) { 708 Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() ); 709 Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) ); 710 ex_klass_node->init_req( i, k ); 711 } 712 _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT); 713 714 } 715 } 716 717 // Scan the exception table for applicable handlers. 718 // If none, we can call rethrow() and be done! 719 // If precise (loaded with no subklasses), insert a D.S. style 720 // pointer compare to the correct handler and loop back. 721 // If imprecise, switch to the Rethrow VM-call style handling. 722 723 int remaining = handlers.count_remaining(); 724 725 // iterate through all entries sequentially 726 for (;!handlers.is_done(); handlers.next()) { 727 // Do nothing if turned off 728 if( !DeutschShiffmanExceptions ) break; 729 ciExceptionHandler* handler = handlers.handler(); 730 731 if (handler->is_rethrow()) { 732 // If we fell off the end of the table without finding an imprecise 733 // exception klass (and without finding a generic handler) then we 734 // know this exception is not handled in this method. We just rethrow 735 // the exception into the caller. 736 throw_to_exit(make_exception_state(ex_node)); 737 return; 738 } 739 740 // exception handler bci range covers throw_bci => investigate further 741 int handler_bci = handler->handler_bci(); 742 743 if (remaining == 1) { 744 push_ex_oop(ex_node); // Push exception oop for handler 745 #ifndef PRODUCT 746 if (PrintOpto && WizardMode) { 747 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci); 748 } 749 #endif 750 merge_exception(handler_bci); // jump to handler 751 return; // No more handling to be done here! 752 } 753 754 // %%% The following logic replicates make_from_klass_unique. 755 // TO DO: Replace by a subroutine call. Then generalize 756 // the type check, as noted in the next "%%%" comment. 757 758 ciInstanceKlass* klass = handler->catch_klass(); 759 if (UseUniqueSubclasses) { 760 // (We use make_from_klass because it respects UseUniqueSubclasses.) 761 const TypeOopPtr* tp = TypeOopPtr::make_from_klass(klass); 762 klass = tp->klass()->as_instance_klass(); 763 } 764 765 // Get the handler's klass 766 if (!klass->is_loaded()) // klass is not loaded? 767 break; // Must call Rethrow! 768 if (klass->is_interface()) // should not happen, but... 769 break; // bail out 770 // See if the loaded exception klass has no subtypes 771 if (klass->has_subklass()) 772 break; // Cannot easily do precise test ==> Rethrow 773 774 // %%% Now that subclass checking is very fast, we need to rewrite 775 // this section and remove the option "DeutschShiffmanExceptions". 776 // The exception processing chain should be a normal typecase pattern, 777 // with a bailout to the interpreter only in the case of unloaded 778 // classes. (The bailout should mark the method non-entrant.) 779 // This rewrite should be placed in GraphKit::, not Parse::. 780 781 // Add a dependence; if any subclass added we need to recompile 782 // %%% should use stronger assert_unique_concrete_subtype instead 783 if (!klass->is_final()) { 784 C->dependencies()->assert_leaf_type(klass); 785 } 786 787 // Implement precise test 788 const TypeKlassPtr *tk = TypeKlassPtr::make(klass); 789 Node* con = _gvn.makecon(tk); 790 Node* cmp = _gvn.transform( new (C, 3) CmpPNode(ex_klass_node, con) ); 791 Node* bol = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) ); 792 { BuildCutout unless(this, bol, PROB_LIKELY(0.7f)); 793 const TypeInstPtr* tinst = TypeInstPtr::make_exact(TypePtr::NotNull, klass); 794 Node* ex_oop = _gvn.transform(new (C, 2) CheckCastPPNode(control(), ex_node, tinst)); 795 push_ex_oop(ex_oop); // Push exception oop for handler 796 #ifndef PRODUCT 797 if (PrintOpto && WizardMode) { 798 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci); 799 klass->print_name(); 800 tty->cr(); 801 } 802 #endif 803 merge_exception(handler_bci); 804 } 805 806 // Come here if exception does not match handler. 807 // Carry on with more handler checks. 808 --remaining; 809 } 810 811 assert(!stopped(), "you should return if you finish the chain"); 812 813 if (remaining == 1) { 814 // Further checks do not matter. 815 } 816 817 if (can_rerun_bytecode()) { 818 // Do not push_ex_oop here! 819 // Re-executing the bytecode will reproduce the throwing condition. 820 bool must_throw = true; 821 uncommon_trap(Deoptimization::Reason_unhandled, 822 Deoptimization::Action_none, 823 (ciKlass*)NULL, (const char*)NULL, // default args 824 must_throw); 825 return; 826 } 827 828 // Oops, need to call into the VM to resolve the klasses at runtime. 829 // Note: This call must not deoptimize, since it is not a real at this bci! 830 kill_dead_locals(); 831 832 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW, 833 OptoRuntime::rethrow_Type(), 834 OptoRuntime::rethrow_stub(), 835 NULL, NULL, 836 ex_node); 837 838 // Rethrow is a pure call, no side effects, only a result. 839 // The result cannot be allocated, so we use I_O 840 841 // Catch exceptions from the rethrow 842 catch_call_exceptions(handlers); 843 } 844 845 846 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.) 847 848 849 #ifndef PRODUCT 850 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) { 851 if( CountCompiledCalls ) { 852 if( at_method_entry ) { 853 // bump invocation counter if top method (for statistics) 854 if (CountCompiledCalls && depth() == 1) { 855 const TypeInstPtr* addr_type = TypeInstPtr::make(method()); 856 Node* adr1 = makecon(addr_type); 857 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset())); 858 increment_counter(adr2); 859 } 860 } else if (is_inline) { 861 switch (bc()) { 862 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break; 863 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break; 864 case Bytecodes::_invokestatic: 865 case Bytecodes::_invokedynamic: 866 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break; 867 default: fatal("unexpected call bytecode"); 868 } 869 } else { 870 switch (bc()) { 871 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break; 872 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break; 873 case Bytecodes::_invokestatic: 874 case Bytecodes::_invokedynamic: 875 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break; 876 default: fatal("unexpected call bytecode"); 877 } 878 } 879 } 880 } 881 #endif //PRODUCT 882 883 884 // Identify possible target method and inlining style 885 ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, 886 ciMethod *dest_method, const TypeOopPtr* receiver_type) { 887 // only use for virtual or interface calls 888 889 // If it is obviously final, do not bother to call find_monomorphic_target, 890 // because the class hierarchy checks are not needed, and may fail due to 891 // incompletely loaded classes. Since we do our own class loading checks 892 // in this module, we may confidently bind to any method. 893 if (dest_method->can_be_statically_bound()) { 894 return dest_method; 895 } 896 897 // Attempt to improve the receiver 898 bool actual_receiver_is_exact = false; 899 ciInstanceKlass* actual_receiver = klass; 900 if (receiver_type != NULL) { 901 // Array methods are all inherited from Object, and are monomorphic. 902 if (receiver_type->isa_aryptr() && 903 dest_method->holder() == env()->Object_klass()) { 904 return dest_method; 905 } 906 907 // All other interesting cases are instance klasses. 908 if (!receiver_type->isa_instptr()) { 909 return NULL; 910 } 911 912 ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass(); 913 if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() && 914 (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) { 915 // ikl is a same or better type than the original actual_receiver, 916 // e.g. static receiver from bytecodes. 917 actual_receiver = ikl; 918 // Is the actual_receiver exact? 919 actual_receiver_is_exact = receiver_type->klass_is_exact(); 920 } 921 } 922 923 ciInstanceKlass* calling_klass = caller->holder(); 924 ciMethod* cha_monomorphic_target = dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver); 925 if (cha_monomorphic_target != NULL) { 926 assert(!cha_monomorphic_target->is_abstract(), ""); 927 // Look at the method-receiver type. Does it add "too much information"? 928 ciKlass* mr_klass = cha_monomorphic_target->holder(); 929 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass); 930 if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) { 931 // Calling this method would include an implicit cast to its holder. 932 // %%% Not yet implemented. Would throw minor asserts at present. 933 // %%% The most common wins are already gained by +UseUniqueSubclasses. 934 // To fix, put the higher_equal check at the call of this routine, 935 // and add a CheckCastPP to the receiver. 936 if (TraceDependencies) { 937 tty->print_cr("found unique CHA method, but could not cast up"); 938 tty->print(" method = "); 939 cha_monomorphic_target->print(); 940 tty->cr(); 941 } 942 if (C->log() != NULL) { 943 C->log()->elem("missed_CHA_opportunity klass='%d' method='%d'", 944 C->log()->identify(klass), 945 C->log()->identify(cha_monomorphic_target)); 946 } 947 cha_monomorphic_target = NULL; 948 } 949 } 950 if (cha_monomorphic_target != NULL) { 951 // Hardwiring a virtual. 952 // If we inlined because CHA revealed only a single target method, 953 // then we are dependent on that target method not getting overridden 954 // by dynamic class loading. Be sure to test the "static" receiver 955 // dest_method here, as opposed to the actual receiver, which may 956 // falsely lead us to believe that the receiver is final or private. 957 C->dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target); 958 return cha_monomorphic_target; 959 } 960 961 // If the type is exact, we can still bind the method w/o a vcall. 962 // (This case comes after CHA so we can see how much extra work it does.) 963 if (actual_receiver_is_exact) { 964 // In case of evolution, there is a dependence on every inlined method, since each 965 // such method can be changed when its class is redefined. 966 ciMethod* exact_method = dest_method->resolve_invoke(calling_klass, actual_receiver); 967 if (exact_method != NULL) { 968 #ifndef PRODUCT 969 if (PrintOpto) { 970 tty->print(" Calling method via exact type @%d --- ", bci); 971 exact_method->print_name(); 972 tty->cr(); 973 } 974 #endif 975 return exact_method; 976 } 977 } 978 979 return NULL; 980 }