1 /* 2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_callGenerator.cpp.incl" 27 28 CallGenerator::CallGenerator(ciMethod* method) { 29 _method = method; 30 } 31 32 // Utility function. 33 const TypeFunc* CallGenerator::tf() const { 34 return TypeFunc::make(method()); 35 } 36 37 //-----------------------------ParseGenerator--------------------------------- 38 // Internal class which handles all direct bytecode traversal. 39 class ParseGenerator : public InlineCallGenerator { 40 private: 41 bool _is_osr; 42 float _expected_uses; 43 44 public: 45 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 46 : InlineCallGenerator(method) 47 { 48 _is_osr = is_osr; 49 _expected_uses = expected_uses; 50 assert(can_parse(method, is_osr), "parse must be possible"); 51 } 52 53 // Can we build either an OSR or a regular parser for this method? 54 static bool can_parse(ciMethod* method, int is_osr = false); 55 56 virtual bool is_parse() const { return true; } 57 virtual JVMState* generate(JVMState* jvms); 58 int is_osr() { return _is_osr; } 59 60 }; 61 62 JVMState* ParseGenerator::generate(JVMState* jvms) { 63 Compile* C = Compile::current(); 64 65 if (is_osr()) { 66 // The JVMS for a OSR has a single argument (see its TypeFunc). 67 assert(jvms->depth() == 1, "no inline OSR"); 68 } 69 70 if (C->failing()) { 71 return NULL; // bailing out of the compile; do not try to parse 72 } 73 74 Parse parser(jvms, method(), _expected_uses); 75 // Grab signature for matching/allocation 76 #ifdef ASSERT 77 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 78 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 79 assert(C->env()->system_dictionary_modification_counter_changed(), 80 "Must invalidate if TypeFuncs differ"); 81 } 82 #endif 83 84 GraphKit& exits = parser.exits(); 85 86 if (C->failing()) { 87 while (exits.pop_exception_state() != NULL) ; 88 return NULL; 89 } 90 91 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 92 93 // Simply return the exit state of the parser, 94 // augmented by any exceptional states. 95 return exits.transfer_exceptions_into_jvms(); 96 } 97 98 //---------------------------DirectCallGenerator------------------------------ 99 // Internal class which handles all out-of-line calls w/o receiver type checks. 100 class DirectCallGenerator : public CallGenerator { 101 public: 102 DirectCallGenerator(ciMethod* method) 103 : CallGenerator(method) 104 { 105 } 106 virtual JVMState* generate(JVMState* jvms); 107 }; 108 109 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 110 GraphKit kit(jvms); 111 bool is_static = method()->is_static(); 112 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 113 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 114 115 if (kit.C->log() != NULL) { 116 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 117 } 118 119 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci()); 120 if (!is_static) { 121 // Make an explicit receiver null_check as part of this call. 122 // Since we share a map with the caller, his JVMS gets adjusted. 123 kit.null_check_receiver(method()); 124 if (kit.stopped()) { 125 // And dump it back to the caller, decorated with any exceptions: 126 return kit.transfer_exceptions_into_jvms(); 127 } 128 // Mark the call node as virtual, sort of: 129 call->set_optimized_virtual(true); 130 if (method()->is_method_handle_invoke()) 131 call->set_method_handle_invoke(true); 132 } 133 kit.set_arguments_for_java_call(call); 134 kit.set_edges_for_java_call(call); 135 Node* ret = kit.set_results_for_java_call(call); 136 kit.push_node(method()->return_type()->basic_type(), ret); 137 return kit.transfer_exceptions_into_jvms(); 138 } 139 140 //---------------------------DynamicCallGenerator----------------------------- 141 // Internal class which handles all out-of-line dynamic calls. 142 class DynamicCallGenerator : public CallGenerator { 143 public: 144 DynamicCallGenerator(ciMethod* method) 145 : CallGenerator(method) 146 { 147 } 148 virtual JVMState* generate(JVMState* jvms); 149 }; 150 151 JVMState* DynamicCallGenerator::generate(JVMState* jvms) { 152 GraphKit kit(jvms); 153 154 if (kit.C->log() != NULL) { 155 kit.C->log()->elem("dynamic_call bci='%d'", jvms->bci()); 156 } 157 158 // Get the constant pool cache from the caller class. 159 ciMethod* caller_method = jvms->method(); 160 ciBytecodeStream str(caller_method); 161 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. 162 assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!"); 163 ciCPCache* cpcache = str.get_cpcache(); 164 165 // Get the offset of the CallSite from the constant pool cache 166 // pointer. 167 int index = str.get_method_index(); 168 size_t call_site_offset = cpcache->get_f1_offset(index); 169 170 // Load the CallSite object from the constant pool cache. 171 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache); 172 Node* cpc = kit.makecon(cpcache_ptr); 173 Node* adr = kit.basic_plus_adr(cpc, cpc, call_site_offset); 174 Node* call_site = kit.make_load(kit.control(), adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); 175 176 // Load the MethodHandle (target) from the CallSite object. 177 Node* mh_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes()); 178 Node* mh = kit.make_load(kit.control(), mh_adr, TypeInstPtr::BOTTOM, T_OBJECT); 179 180 address stub = SharedRuntime::get_resolve_opt_virtual_call_stub(); 181 182 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), stub, method(), kit.bci()); 183 // invokedynamic is treated as an optimized invokevirtual. 184 call->set_optimized_virtual(true); 185 // Take extra care (in the presence of argument motion) not to trash the SP: 186 call->set_method_handle_invoke(true); 187 188 // Pass the MethodHandle as first argument and shift the other 189 // arguments. 190 call->init_req(0 + TypeFunc::Parms, mh); 191 uint nargs = call->method()->arg_size(); 192 for (uint i = 1; i < nargs; i++) { 193 Node* arg = kit.argument(i - 1); 194 call->init_req(i + TypeFunc::Parms, arg); 195 } 196 197 kit.set_edges_for_java_call(call); 198 Node* ret = kit.set_results_for_java_call(call); 199 kit.push_node(method()->return_type()->basic_type(), ret); 200 return kit.transfer_exceptions_into_jvms(); 201 } 202 203 //--------------------------VirtualCallGenerator------------------------------ 204 // Internal class which handles all out-of-line calls checking receiver type. 205 class VirtualCallGenerator : public CallGenerator { 206 private: 207 int _vtable_index; 208 public: 209 VirtualCallGenerator(ciMethod* method, int vtable_index) 210 : CallGenerator(method), _vtable_index(vtable_index) 211 { 212 assert(vtable_index == methodOopDesc::invalid_vtable_index || 213 vtable_index >= 0, "either invalid or usable"); 214 } 215 virtual bool is_virtual() const { return true; } 216 virtual JVMState* generate(JVMState* jvms); 217 }; 218 219 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 220 GraphKit kit(jvms); 221 Node* receiver = kit.argument(0); 222 223 if (kit.C->log() != NULL) { 224 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 225 } 226 227 // If the receiver is a constant null, do not torture the system 228 // by attempting to call through it. The compile will proceed 229 // correctly, but may bail out in final_graph_reshaping, because 230 // the call instruction will have a seemingly deficient out-count. 231 // (The bailout says something misleading about an "infinite loop".) 232 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 233 kit.inc_sp(method()->arg_size()); // restore arguments 234 kit.uncommon_trap(Deoptimization::Reason_null_check, 235 Deoptimization::Action_none, 236 NULL, "null receiver"); 237 return kit.transfer_exceptions_into_jvms(); 238 } 239 240 // Ideally we would unconditionally do a null check here and let it 241 // be converted to an implicit check based on profile information. 242 // However currently the conversion to implicit null checks in 243 // Block::implicit_null_check() only looks for loads and stores, not calls. 244 ciMethod *caller = kit.method(); 245 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 246 if (!UseInlineCaches || !ImplicitNullChecks || 247 ((ImplicitNullCheckThreshold > 0) && caller_md && 248 (caller_md->trap_count(Deoptimization::Reason_null_check) 249 >= (uint)ImplicitNullCheckThreshold))) { 250 // Make an explicit receiver null_check as part of this call. 251 // Since we share a map with the caller, his JVMS gets adjusted. 252 receiver = kit.null_check_receiver(method()); 253 if (kit.stopped()) { 254 // And dump it back to the caller, decorated with any exceptions: 255 return kit.transfer_exceptions_into_jvms(); 256 } 257 } 258 259 assert(!method()->is_static(), "virtual call must not be to static"); 260 assert(!method()->is_final(), "virtual call should not be to final"); 261 assert(!method()->is_private(), "virtual call should not be to private"); 262 assert(_vtable_index == methodOopDesc::invalid_vtable_index || !UseInlineCaches, 263 "no vtable calls if +UseInlineCaches "); 264 address target = SharedRuntime::get_resolve_virtual_call_stub(); 265 // Normal inline cache used for call 266 CallDynamicJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 267 kit.set_arguments_for_java_call(call); 268 kit.set_edges_for_java_call(call); 269 Node* ret = kit.set_results_for_java_call(call); 270 kit.push_node(method()->return_type()->basic_type(), ret); 271 272 // Represent the effect of an implicit receiver null_check 273 // as part of this call. Since we share a map with the caller, 274 // his JVMS gets adjusted. 275 kit.cast_not_null(receiver); 276 return kit.transfer_exceptions_into_jvms(); 277 } 278 279 bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) { 280 // Certain methods cannot be parsed at all: 281 if (!m->can_be_compiled()) return false; 282 if (!m->has_balanced_monitors()) return false; 283 if (m->get_flow_analysis()->failing()) return false; 284 285 // (Methods may bail out for other reasons, after the parser is run. 286 // We try to avoid this, but if forced, we must return (Node*)NULL. 287 // The user of the CallGenerator must check for this condition.) 288 return true; 289 } 290 291 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 292 if (!ParseGenerator::can_parse(m)) return NULL; 293 return new ParseGenerator(m, expected_uses); 294 } 295 296 // As a special case, the JVMS passed to this CallGenerator is 297 // for the method execution already in progress, not just the JVMS 298 // of the caller. Thus, this CallGenerator cannot be mixed with others! 299 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 300 if (!ParseGenerator::can_parse(m, true)) return NULL; 301 float past_uses = m->interpreter_invocation_count(); 302 float expected_uses = past_uses; 303 return new ParseGenerator(m, expected_uses, true); 304 } 305 306 CallGenerator* CallGenerator::for_direct_call(ciMethod* m) { 307 assert(!m->is_abstract(), "for_direct_call mismatch"); 308 return new DirectCallGenerator(m); 309 } 310 311 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) { 312 assert(m->is_method_handle_invoke(), "for_dynamic_call mismatch"); 313 return new DynamicCallGenerator(m); 314 } 315 316 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 317 assert(!m->is_static(), "for_virtual_call mismatch"); 318 assert(!m->is_method_handle_invoke(), "should be a direct call"); 319 return new VirtualCallGenerator(m, vtable_index); 320 } 321 322 323 //---------------------------WarmCallGenerator-------------------------------- 324 // Internal class which handles initial deferral of inlining decisions. 325 class WarmCallGenerator : public CallGenerator { 326 WarmCallInfo* _call_info; 327 CallGenerator* _if_cold; 328 CallGenerator* _if_hot; 329 bool _is_virtual; // caches virtuality of if_cold 330 bool _is_inline; // caches inline-ness of if_hot 331 332 public: 333 WarmCallGenerator(WarmCallInfo* ci, 334 CallGenerator* if_cold, 335 CallGenerator* if_hot) 336 : CallGenerator(if_cold->method()) 337 { 338 assert(method() == if_hot->method(), "consistent choices"); 339 _call_info = ci; 340 _if_cold = if_cold; 341 _if_hot = if_hot; 342 _is_virtual = if_cold->is_virtual(); 343 _is_inline = if_hot->is_inline(); 344 } 345 346 virtual bool is_inline() const { return _is_inline; } 347 virtual bool is_virtual() const { return _is_virtual; } 348 virtual bool is_deferred() const { return true; } 349 350 virtual JVMState* generate(JVMState* jvms); 351 }; 352 353 354 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 355 CallGenerator* if_cold, 356 CallGenerator* if_hot) { 357 return new WarmCallGenerator(ci, if_cold, if_hot); 358 } 359 360 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 361 Compile* C = Compile::current(); 362 if (C->log() != NULL) { 363 C->log()->elem("warm_call bci='%d'", jvms->bci()); 364 } 365 jvms = _if_cold->generate(jvms); 366 if (jvms != NULL) { 367 Node* m = jvms->map()->control(); 368 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 369 if (m->is_Catch()) m = m->in(0); else m = C->top(); 370 if (m->is_Proj()) m = m->in(0); else m = C->top(); 371 if (m->is_CallJava()) { 372 _call_info->set_call(m->as_Call()); 373 _call_info->set_hot_cg(_if_hot); 374 #ifndef PRODUCT 375 if (PrintOpto || PrintOptoInlining) { 376 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 377 tty->print("WCI: "); 378 _call_info->print(); 379 } 380 #endif 381 _call_info->set_heat(_call_info->compute_heat()); 382 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 383 } 384 } 385 return jvms; 386 } 387 388 void WarmCallInfo::make_hot() { 389 Compile* C = Compile::current(); 390 // Replace the callnode with something better. 391 CallJavaNode* call = this->call()->as_CallJava(); 392 ciMethod* method = call->method(); 393 int nargs = method->arg_size(); 394 JVMState* jvms = call->jvms()->clone_shallow(C); 395 uint size = TypeFunc::Parms + MAX2(2, nargs); 396 SafePointNode* map = new (C, size) SafePointNode(size, jvms); 397 for (uint i1 = 0; i1 < (uint)(TypeFunc::Parms + nargs); i1++) { 398 map->init_req(i1, call->in(i1)); 399 } 400 jvms->set_map(map); 401 jvms->set_offsets(map->req()); 402 jvms->set_locoff(TypeFunc::Parms); 403 jvms->set_stkoff(TypeFunc::Parms); 404 GraphKit kit(jvms); 405 406 JVMState* new_jvms = _hot_cg->generate(kit.jvms()); 407 if (new_jvms == NULL) return; // no change 408 if (C->failing()) return; 409 410 kit.set_jvms(new_jvms); 411 Node* res = C->top(); 412 int res_size = method->return_type()->size(); 413 if (res_size != 0) { 414 kit.inc_sp(-res_size); 415 res = kit.argument(0); 416 } 417 GraphKit ekit(kit.combine_and_pop_all_exception_states()->jvms()); 418 419 // Replace the call: 420 for (DUIterator i = call->outs(); call->has_out(i); i++) { 421 Node* n = call->out(i); 422 Node* nn = NULL; // replacement 423 if (n->is_Proj()) { 424 ProjNode* nproj = n->as_Proj(); 425 assert(nproj->_con < (uint)(TypeFunc::Parms + (res_size ? 1 : 0)), "sane proj"); 426 if (nproj->_con == TypeFunc::Parms) { 427 nn = res; 428 } else { 429 nn = kit.map()->in(nproj->_con); 430 } 431 if (nproj->_con == TypeFunc::I_O) { 432 for (DUIterator j = nproj->outs(); nproj->has_out(j); j++) { 433 Node* e = nproj->out(j); 434 if (e->Opcode() == Op_CreateEx) { 435 e->replace_by(ekit.argument(0)); 436 } else if (e->Opcode() == Op_Catch) { 437 for (DUIterator k = e->outs(); e->has_out(k); k++) { 438 CatchProjNode* p = e->out(j)->as_CatchProj(); 439 if (p->is_handler_proj()) { 440 p->replace_by(ekit.control()); 441 } else { 442 p->replace_by(kit.control()); 443 } 444 } 445 } 446 } 447 } 448 } 449 NOT_PRODUCT(if (!nn) n->dump(2)); 450 assert(nn != NULL, "don't know what to do with this user"); 451 n->replace_by(nn); 452 } 453 } 454 455 void WarmCallInfo::make_cold() { 456 // No action: Just dequeue. 457 } 458 459 460 //------------------------PredictedCallGenerator------------------------------ 461 // Internal class which handles all out-of-line calls checking receiver type. 462 class PredictedCallGenerator : public CallGenerator { 463 ciKlass* _predicted_receiver; 464 CallGenerator* _if_missed; 465 CallGenerator* _if_hit; 466 float _hit_prob; 467 468 public: 469 PredictedCallGenerator(ciKlass* predicted_receiver, 470 CallGenerator* if_missed, 471 CallGenerator* if_hit, float hit_prob) 472 : CallGenerator(if_missed->method()) 473 { 474 // The call profile data may predict the hit_prob as extreme as 0 or 1. 475 // Remove the extremes values from the range. 476 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 477 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 478 479 _predicted_receiver = predicted_receiver; 480 _if_missed = if_missed; 481 _if_hit = if_hit; 482 _hit_prob = hit_prob; 483 } 484 485 virtual bool is_virtual() const { return true; } 486 virtual bool is_inline() const { return _if_hit->is_inline(); } 487 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 488 489 virtual JVMState* generate(JVMState* jvms); 490 }; 491 492 493 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 494 CallGenerator* if_missed, 495 CallGenerator* if_hit, 496 float hit_prob) { 497 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 498 } 499 500 501 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 502 GraphKit kit(jvms); 503 PhaseGVN& gvn = kit.gvn(); 504 // We need an explicit receiver null_check before checking its type. 505 // We share a map with the caller, so his JVMS gets adjusted. 506 Node* receiver = kit.argument(0); 507 508 CompileLog* log = kit.C->log(); 509 if (log != NULL) { 510 log->elem("predicted_call bci='%d' klass='%d'", 511 jvms->bci(), log->identify(_predicted_receiver)); 512 } 513 514 receiver = kit.null_check_receiver(method()); 515 if (kit.stopped()) { 516 return kit.transfer_exceptions_into_jvms(); 517 } 518 519 Node* exact_receiver = receiver; // will get updated in place... 520 Node* slow_ctl = kit.type_check_receiver(receiver, 521 _predicted_receiver, _hit_prob, 522 &exact_receiver); 523 524 SafePointNode* slow_map = NULL; 525 JVMState* slow_jvms; 526 { PreserveJVMState pjvms(&kit); 527 kit.set_control(slow_ctl); 528 if (!kit.stopped()) { 529 slow_jvms = _if_missed->generate(kit.sync_jvms()); 530 assert(slow_jvms != NULL, "miss path must not fail to generate"); 531 kit.add_exception_states_from(slow_jvms); 532 kit.set_map(slow_jvms->map()); 533 if (!kit.stopped()) 534 slow_map = kit.stop(); 535 } 536 } 537 538 if (kit.stopped()) { 539 // Instance exactly does not matches the desired type. 540 kit.set_jvms(slow_jvms); 541 return kit.transfer_exceptions_into_jvms(); 542 } 543 544 // fall through if the instance exactly matches the desired type 545 kit.replace_in_map(receiver, exact_receiver); 546 547 // Make the hot call: 548 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 549 if (new_jvms == NULL) { 550 // Inline failed, so make a direct call. 551 assert(_if_hit->is_inline(), "must have been a failed inline"); 552 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 553 new_jvms = cg->generate(kit.sync_jvms()); 554 } 555 kit.add_exception_states_from(new_jvms); 556 kit.set_jvms(new_jvms); 557 558 // Need to merge slow and fast? 559 if (slow_map == NULL) { 560 // The fast path is the only path remaining. 561 return kit.transfer_exceptions_into_jvms(); 562 } 563 564 if (kit.stopped()) { 565 // Inlined method threw an exception, so it's just the slow path after all. 566 kit.set_jvms(slow_jvms); 567 return kit.transfer_exceptions_into_jvms(); 568 } 569 570 // Finish the diamond. 571 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 572 RegionNode* region = new (kit.C, 3) RegionNode(3); 573 region->init_req(1, kit.control()); 574 region->init_req(2, slow_map->control()); 575 kit.set_control(gvn.transform(region)); 576 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 577 iophi->set_req(2, slow_map->i_o()); 578 kit.set_i_o(gvn.transform(iophi)); 579 kit.merge_memory(slow_map->merged_memory(), region, 2); 580 uint tos = kit.jvms()->stkoff() + kit.sp(); 581 uint limit = slow_map->req(); 582 for (uint i = TypeFunc::Parms; i < limit; i++) { 583 // Skip unused stack slots; fast forward to monoff(); 584 if (i == tos) { 585 i = kit.jvms()->monoff(); 586 if( i >= limit ) break; 587 } 588 Node* m = kit.map()->in(i); 589 Node* n = slow_map->in(i); 590 if (m != n) { 591 const Type* t = gvn.type(m)->meet(gvn.type(n)); 592 Node* phi = PhiNode::make(region, m, t); 593 phi->set_req(2, n); 594 kit.map()->set_req(i, gvn.transform(phi)); 595 } 596 } 597 return kit.transfer_exceptions_into_jvms(); 598 } 599 600 601 //-------------------------UncommonTrapCallGenerator----------------------------- 602 // Internal class which handles all out-of-line calls checking receiver type. 603 class UncommonTrapCallGenerator : public CallGenerator { 604 Deoptimization::DeoptReason _reason; 605 Deoptimization::DeoptAction _action; 606 607 public: 608 UncommonTrapCallGenerator(ciMethod* m, 609 Deoptimization::DeoptReason reason, 610 Deoptimization::DeoptAction action) 611 : CallGenerator(m) 612 { 613 _reason = reason; 614 _action = action; 615 } 616 617 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 618 virtual bool is_trap() const { return true; } 619 620 virtual JVMState* generate(JVMState* jvms); 621 }; 622 623 624 CallGenerator* 625 CallGenerator::for_uncommon_trap(ciMethod* m, 626 Deoptimization::DeoptReason reason, 627 Deoptimization::DeoptAction action) { 628 return new UncommonTrapCallGenerator(m, reason, action); 629 } 630 631 632 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 633 GraphKit kit(jvms); 634 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 635 int nargs = method()->arg_size(); 636 kit.inc_sp(nargs); 637 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 638 if (_reason == Deoptimization::Reason_class_check && 639 _action == Deoptimization::Action_maybe_recompile) { 640 // Temp fix for 6529811 641 // Don't allow uncommon_trap to override our decision to recompile in the event 642 // of a class cast failure for a monomorphic call as it will never let us convert 643 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 644 bool keep_exact_action = true; 645 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 646 } else { 647 kit.uncommon_trap(_reason, _action); 648 } 649 return kit.transfer_exceptions_into_jvms(); 650 } 651 652 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 653 654 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 655 656 #define NODES_OVERHEAD_PER_METHOD (30.0) 657 #define NODES_PER_BYTECODE (9.5) 658 659 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 660 int call_count = profile.count(); 661 int code_size = call_method->code_size(); 662 663 // Expected execution count is based on the historical count: 664 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 665 666 // Expected profit from inlining, in units of simple call-overheads. 667 _profit = 1.0; 668 669 // Expected work performed by the call in units of call-overheads. 670 // %%% need an empirical curve fit for "work" (time in call) 671 float bytecodes_per_call = 3; 672 _work = 1.0 + code_size / bytecodes_per_call; 673 674 // Expected size of compilation graph: 675 // -XX:+PrintParseStatistics once reported: 676 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 677 // Histogram of 144298 parsed bytecodes: 678 // %%% Need an better predictor for graph size. 679 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 680 } 681 682 // is_cold: Return true if the node should never be inlined. 683 // This is true if any of the key metrics are extreme. 684 bool WarmCallInfo::is_cold() const { 685 if (count() < WarmCallMinCount) return true; 686 if (profit() < WarmCallMinProfit) return true; 687 if (work() > WarmCallMaxWork) return true; 688 if (size() > WarmCallMaxSize) return true; 689 return false; 690 } 691 692 // is_hot: Return true if the node should be inlined immediately. 693 // This is true if any of the key metrics are extreme. 694 bool WarmCallInfo::is_hot() const { 695 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 696 if (count() >= HotCallCountThreshold) return true; 697 if (profit() >= HotCallProfitThreshold) return true; 698 if (work() <= HotCallTrivialWork) return true; 699 if (size() <= HotCallTrivialSize) return true; 700 return false; 701 } 702 703 // compute_heat: 704 float WarmCallInfo::compute_heat() const { 705 assert(!is_cold(), "compute heat only on warm nodes"); 706 assert(!is_hot(), "compute heat only on warm nodes"); 707 int min_size = MAX2(0, (int)HotCallTrivialSize); 708 int max_size = MIN2(500, (int)WarmCallMaxSize); 709 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 710 float size_factor; 711 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 712 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 713 else if (method_size < 0.5) size_factor = 1; // better than avg. 714 else size_factor = 0.5; // worse than avg. 715 return (count() * profit() * size_factor); 716 } 717 718 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 719 assert(this != that, "compare only different WCIs"); 720 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 721 if (this->heat() > that->heat()) return true; 722 if (this->heat() < that->heat()) return false; 723 assert(this->heat() == that->heat(), "no NaN heat allowed"); 724 // Equal heat. Break the tie some other way. 725 if (!this->call() || !that->call()) return (address)this > (address)that; 726 return this->call()->_idx > that->call()->_idx; 727 } 728 729 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 730 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 731 732 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 733 assert(next() == UNINIT_NEXT, "not yet on any list"); 734 WarmCallInfo* prev_p = NULL; 735 WarmCallInfo* next_p = head; 736 while (next_p != NULL && next_p->warmer_than(this)) { 737 prev_p = next_p; 738 next_p = prev_p->next(); 739 } 740 // Install this between prev_p and next_p. 741 this->set_next(next_p); 742 if (prev_p == NULL) 743 head = this; 744 else 745 prev_p->set_next(this); 746 return head; 747 } 748 749 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 750 WarmCallInfo* prev_p = NULL; 751 WarmCallInfo* next_p = head; 752 while (next_p != this) { 753 assert(next_p != NULL, "this must be in the list somewhere"); 754 prev_p = next_p; 755 next_p = prev_p->next(); 756 } 757 next_p = this->next(); 758 debug_only(this->set_next(UNINIT_NEXT)); 759 // Remove this from between prev_p and next_p. 760 if (prev_p == NULL) 761 head = next_p; 762 else 763 prev_p->set_next(next_p); 764 return head; 765 } 766 767 WarmCallInfo* WarmCallInfo::_always_hot = NULL; 768 WarmCallInfo* WarmCallInfo::_always_cold = NULL; 769 770 WarmCallInfo* WarmCallInfo::always_hot() { 771 if (_always_hot == NULL) { 772 static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; 773 WarmCallInfo* ci = (WarmCallInfo*) bits; 774 ci->_profit = ci->_count = MAX_VALUE(); 775 ci->_work = ci->_size = MIN_VALUE(); 776 _always_hot = ci; 777 } 778 assert(_always_hot->is_hot(), "must always be hot"); 779 return _always_hot; 780 } 781 782 WarmCallInfo* WarmCallInfo::always_cold() { 783 if (_always_cold == NULL) { 784 static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; 785 WarmCallInfo* ci = (WarmCallInfo*) bits; 786 ci->_profit = ci->_count = MIN_VALUE(); 787 ci->_work = ci->_size = MAX_VALUE(); 788 _always_cold = ci; 789 } 790 assert(_always_cold->is_cold(), "must always be cold"); 791 return _always_cold; 792 } 793 794 795 #ifndef PRODUCT 796 797 void WarmCallInfo::print() const { 798 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 799 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 800 count(), profit(), work(), size(), compute_heat(), next()); 801 tty->cr(); 802 if (call() != NULL) call()->dump(); 803 } 804 805 void print_wci(WarmCallInfo* ci) { 806 ci->print(); 807 } 808 809 void WarmCallInfo::print_all() const { 810 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 811 p->print(); 812 } 813 814 int WarmCallInfo::count_all() const { 815 int cnt = 0; 816 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 817 cnt++; 818 return cnt; 819 } 820 821 #endif //PRODUCT