1 /* 2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_callGenerator.cpp.incl" 27 28 CallGenerator::CallGenerator(ciMethod* method) { 29 _method = method; 30 } 31 32 // Utility function. 33 const TypeFunc* CallGenerator::tf() const { 34 return TypeFunc::make(method()); 35 } 36 37 //-----------------------------ParseGenerator--------------------------------- 38 // Internal class which handles all direct bytecode traversal. 39 class ParseGenerator : public InlineCallGenerator { 40 private: 41 bool _is_osr; 42 float _expected_uses; 43 44 public: 45 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 46 : InlineCallGenerator(method) 47 { 48 _is_osr = is_osr; 49 _expected_uses = expected_uses; 50 assert(can_parse(method, is_osr), "parse must be possible"); 51 } 52 53 // Can we build either an OSR or a regular parser for this method? 54 static bool can_parse(ciMethod* method, int is_osr = false); 55 56 virtual bool is_parse() const { return true; } 57 virtual JVMState* generate(JVMState* jvms); 58 int is_osr() { return _is_osr; } 59 60 }; 61 62 JVMState* ParseGenerator::generate(JVMState* jvms) { 63 Compile* C = Compile::current(); 64 65 if (is_osr()) { 66 // The JVMS for a OSR has a single argument (see its TypeFunc). 67 assert(jvms->depth() == 1, "no inline OSR"); 68 } 69 70 if (C->failing()) { 71 return NULL; // bailing out of the compile; do not try to parse 72 } 73 74 Parse parser(jvms, method(), _expected_uses); 75 // Grab signature for matching/allocation 76 #ifdef ASSERT 77 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 78 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 79 assert(C->env()->system_dictionary_modification_counter_changed(), 80 "Must invalidate if TypeFuncs differ"); 81 } 82 #endif 83 84 GraphKit& exits = parser.exits(); 85 86 if (C->failing()) { 87 while (exits.pop_exception_state() != NULL) ; 88 return NULL; 89 } 90 91 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 92 93 // Simply return the exit state of the parser, 94 // augmented by any exceptional states. 95 return exits.transfer_exceptions_into_jvms(); 96 } 97 98 //---------------------------DirectCallGenerator------------------------------ 99 // Internal class which handles all out-of-line calls w/o receiver type checks. 100 class DirectCallGenerator : public CallGenerator { 101 private: 102 CallStaticJavaNode* _call_node; 103 // Force separate memory and I/O projections for the exceptional 104 // paths to facilitate late inlinig. 105 bool _separate_io_proj; 106 107 public: 108 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 109 : CallGenerator(method), 110 _separate_io_proj(separate_io_proj) 111 { 112 } 113 virtual JVMState* generate(JVMState* jvms); 114 115 CallStaticJavaNode* call_node() const { return _call_node; } 116 }; 117 118 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 119 GraphKit kit(jvms); 120 bool is_static = method()->is_static(); 121 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 122 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 123 124 if (kit.C->log() != NULL) { 125 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 126 } 127 128 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci()); 129 if (!is_static) { 130 // Make an explicit receiver null_check as part of this call. 131 // Since we share a map with the caller, his JVMS gets adjusted. 132 kit.null_check_receiver(method()); 133 if (kit.stopped()) { 134 // And dump it back to the caller, decorated with any exceptions: 135 return kit.transfer_exceptions_into_jvms(); 136 } 137 // Mark the call node as virtual, sort of: 138 call->set_optimized_virtual(true); 139 if (method()->is_method_handle_invoke()) { 140 call->set_method_handle_invoke(true); 141 kit.C->set_has_method_handle_invokes(true); 142 } 143 } 144 kit.set_arguments_for_java_call(call); 145 kit.set_edges_for_java_call(call, false, _separate_io_proj); 146 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 147 kit.push_node(method()->return_type()->basic_type(), ret); 148 _call_node = call; // Save the call node in case we need it later 149 return kit.transfer_exceptions_into_jvms(); 150 } 151 152 //---------------------------DynamicCallGenerator----------------------------- 153 // Internal class which handles all out-of-line invokedynamic calls. 154 class DynamicCallGenerator : public CallGenerator { 155 public: 156 DynamicCallGenerator(ciMethod* method) 157 : CallGenerator(method) 158 { 159 } 160 virtual JVMState* generate(JVMState* jvms); 161 }; 162 163 JVMState* DynamicCallGenerator::generate(JVMState* jvms) { 164 GraphKit kit(jvms); 165 166 if (kit.C->log() != NULL) { 167 kit.C->log()->elem("dynamic_call bci='%d'", jvms->bci()); 168 } 169 170 // Get the constant pool cache from the caller class. 171 ciMethod* caller_method = jvms->method(); 172 ciBytecodeStream str(caller_method); 173 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. 174 assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!"); 175 ciCPCache* cpcache = str.get_cpcache(); 176 177 // Get the offset of the CallSite from the constant pool cache 178 // pointer. 179 int index = str.get_method_index(); 180 size_t call_site_offset = cpcache->get_f1_offset(index); 181 182 // Load the CallSite object from the constant pool cache. 183 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache); 184 Node* cpcache_adr = kit.makecon(cpcache_ptr); 185 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset); 186 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); 187 188 // Load the target MethodHandle from the CallSite object. 189 Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes()); 190 Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT); 191 192 address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub(); 193 194 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci()); 195 // invokedynamic is treated as an optimized invokevirtual. 196 call->set_optimized_virtual(true); 197 // Take extra care (in the presence of argument motion) not to trash the SP: 198 call->set_method_handle_invoke(true); 199 kit.C->set_has_method_handle_invokes(true); 200 201 // Pass the target MethodHandle as first argument and shift the 202 // other arguments. 203 call->init_req(0 + TypeFunc::Parms, target_mh); 204 uint nargs = call->method()->arg_size(); 205 for (uint i = 1; i < nargs; i++) { 206 Node* arg = kit.argument(i - 1); 207 call->init_req(i + TypeFunc::Parms, arg); 208 } 209 210 kit.set_edges_for_java_call(call); 211 Node* ret = kit.set_results_for_java_call(call); 212 kit.push_node(method()->return_type()->basic_type(), ret); 213 return kit.transfer_exceptions_into_jvms(); 214 } 215 216 //--------------------------VirtualCallGenerator------------------------------ 217 // Internal class which handles all out-of-line calls checking receiver type. 218 class VirtualCallGenerator : public CallGenerator { 219 private: 220 int _vtable_index; 221 public: 222 VirtualCallGenerator(ciMethod* method, int vtable_index) 223 : CallGenerator(method), _vtable_index(vtable_index) 224 { 225 assert(vtable_index == methodOopDesc::invalid_vtable_index || 226 vtable_index >= 0, "either invalid or usable"); 227 } 228 virtual bool is_virtual() const { return true; } 229 virtual JVMState* generate(JVMState* jvms); 230 }; 231 232 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 233 GraphKit kit(jvms); 234 Node* receiver = kit.argument(0); 235 236 if (kit.C->log() != NULL) { 237 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 238 } 239 240 // If the receiver is a constant null, do not torture the system 241 // by attempting to call through it. The compile will proceed 242 // correctly, but may bail out in final_graph_reshaping, because 243 // the call instruction will have a seemingly deficient out-count. 244 // (The bailout says something misleading about an "infinite loop".) 245 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 246 kit.inc_sp(method()->arg_size()); // restore arguments 247 kit.uncommon_trap(Deoptimization::Reason_null_check, 248 Deoptimization::Action_none, 249 NULL, "null receiver"); 250 return kit.transfer_exceptions_into_jvms(); 251 } 252 253 // Ideally we would unconditionally do a null check here and let it 254 // be converted to an implicit check based on profile information. 255 // However currently the conversion to implicit null checks in 256 // Block::implicit_null_check() only looks for loads and stores, not calls. 257 ciMethod *caller = kit.method(); 258 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 259 if (!UseInlineCaches || !ImplicitNullChecks || 260 ((ImplicitNullCheckThreshold > 0) && caller_md && 261 (caller_md->trap_count(Deoptimization::Reason_null_check) 262 >= (uint)ImplicitNullCheckThreshold))) { 263 // Make an explicit receiver null_check as part of this call. 264 // Since we share a map with the caller, his JVMS gets adjusted. 265 receiver = kit.null_check_receiver(method()); 266 if (kit.stopped()) { 267 // And dump it back to the caller, decorated with any exceptions: 268 return kit.transfer_exceptions_into_jvms(); 269 } 270 } 271 272 assert(!method()->is_static(), "virtual call must not be to static"); 273 assert(!method()->is_final(), "virtual call should not be to final"); 274 assert(!method()->is_private(), "virtual call should not be to private"); 275 assert(_vtable_index == methodOopDesc::invalid_vtable_index || !UseInlineCaches, 276 "no vtable calls if +UseInlineCaches "); 277 address target = SharedRuntime::get_resolve_virtual_call_stub(); 278 // Normal inline cache used for call 279 CallDynamicJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 280 kit.set_arguments_for_java_call(call); 281 kit.set_edges_for_java_call(call); 282 Node* ret = kit.set_results_for_java_call(call); 283 kit.push_node(method()->return_type()->basic_type(), ret); 284 285 // Represent the effect of an implicit receiver null_check 286 // as part of this call. Since we share a map with the caller, 287 // his JVMS gets adjusted. 288 kit.cast_not_null(receiver); 289 return kit.transfer_exceptions_into_jvms(); 290 } 291 292 bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) { 293 // Certain methods cannot be parsed at all: 294 if (!m->can_be_compiled()) return false; 295 if (!m->has_balanced_monitors()) return false; 296 if (m->get_flow_analysis()->failing()) return false; 297 298 // (Methods may bail out for other reasons, after the parser is run. 299 // We try to avoid this, but if forced, we must return (Node*)NULL. 300 // The user of the CallGenerator must check for this condition.) 301 return true; 302 } 303 304 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 305 if (!ParseGenerator::can_parse(m)) return NULL; 306 return new ParseGenerator(m, expected_uses); 307 } 308 309 // As a special case, the JVMS passed to this CallGenerator is 310 // for the method execution already in progress, not just the JVMS 311 // of the caller. Thus, this CallGenerator cannot be mixed with others! 312 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 313 if (!ParseGenerator::can_parse(m, true)) return NULL; 314 float past_uses = m->interpreter_invocation_count(); 315 float expected_uses = past_uses; 316 return new ParseGenerator(m, expected_uses, true); 317 } 318 319 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 320 assert(!m->is_abstract(), "for_direct_call mismatch"); 321 return new DirectCallGenerator(m, separate_io_proj); 322 } 323 324 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) { 325 assert(m->is_method_handle_invoke(), "for_dynamic_call mismatch"); 326 return new DynamicCallGenerator(m); 327 } 328 329 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 330 assert(!m->is_static(), "for_virtual_call mismatch"); 331 assert(!m->is_method_handle_invoke(), "should be a direct call"); 332 return new VirtualCallGenerator(m, vtable_index); 333 } 334 335 // Allow inlining decisions to be delayed 336 class LateInlineCallGenerator : public DirectCallGenerator { 337 CallGenerator* _inline_cg; 338 339 public: 340 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 341 DirectCallGenerator(method, true), _inline_cg(inline_cg) {} 342 343 virtual bool is_late_inline() const { return true; } 344 345 // Convert the CallStaticJava into an inline 346 virtual void do_late_inline(); 347 348 JVMState* generate(JVMState* jvms) { 349 // Record that this call site should be revisited once the main 350 // parse is finished. 351 Compile::current()->add_late_inline(this); 352 353 // Emit the CallStaticJava and request separate projections so 354 // that the late inlining logic can distinguish between fall 355 // through and exceptional uses of the memory and io projections 356 // as is done for allocations and macro expansion. 357 return DirectCallGenerator::generate(jvms); 358 } 359 360 }; 361 362 363 void LateInlineCallGenerator::do_late_inline() { 364 // Can't inline it 365 if (call_node() == NULL || call_node()->outcnt() == 0 || 366 call_node()->in(0) == NULL || call_node()->in(0)->is_top()) 367 return; 368 369 CallStaticJavaNode* call = call_node(); 370 371 // Make a clone of the JVMState that appropriate to use for driving a parse 372 Compile* C = Compile::current(); 373 JVMState* jvms = call->jvms()->clone_shallow(C); 374 uint size = call->req(); 375 SafePointNode* map = new (C, size) SafePointNode(size, jvms); 376 for (uint i1 = 0; i1 < size; i1++) { 377 map->init_req(i1, call->in(i1)); 378 } 379 380 // Make sure the state is a MergeMem for parsing. 381 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 382 map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory))); 383 } 384 385 // Make enough space for the expression stack and transfer the incoming arguments 386 int nargs = method()->arg_size(); 387 jvms->set_map(map); 388 map->ensure_stack(jvms, jvms->method()->max_stack()); 389 if (nargs > 0) { 390 for (int i1 = 0; i1 < nargs; i1++) { 391 map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1)); 392 } 393 } 394 395 CompileLog* log = C->log(); 396 if (log != NULL) { 397 log->head("late_inline method='%d'", log->identify(method())); 398 JVMState* p = jvms; 399 while (p != NULL) { 400 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 401 p = p->caller(); 402 } 403 log->tail("late_inline"); 404 } 405 406 // Setup default node notes to be picked up by the inlining 407 Node_Notes* old_nn = C->default_node_notes(); 408 if (old_nn != NULL) { 409 Node_Notes* entry_nn = old_nn->clone(C); 410 entry_nn->set_jvms(jvms); 411 C->set_default_node_notes(entry_nn); 412 } 413 414 // Now perform the inling using the synthesized JVMState 415 JVMState* new_jvms = _inline_cg->generate(jvms); 416 if (new_jvms == NULL) return; // no change 417 if (C->failing()) return; 418 419 // Capture any exceptional control flow 420 GraphKit kit(new_jvms); 421 422 // Find the result object 423 Node* result = C->top(); 424 int result_size = method()->return_type()->size(); 425 if (result_size != 0 && !kit.stopped()) { 426 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 427 } 428 429 kit.replace_call(call, result); 430 } 431 432 433 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 434 return new LateInlineCallGenerator(method, inline_cg); 435 } 436 437 438 //---------------------------WarmCallGenerator-------------------------------- 439 // Internal class which handles initial deferral of inlining decisions. 440 class WarmCallGenerator : public CallGenerator { 441 WarmCallInfo* _call_info; 442 CallGenerator* _if_cold; 443 CallGenerator* _if_hot; 444 bool _is_virtual; // caches virtuality of if_cold 445 bool _is_inline; // caches inline-ness of if_hot 446 447 public: 448 WarmCallGenerator(WarmCallInfo* ci, 449 CallGenerator* if_cold, 450 CallGenerator* if_hot) 451 : CallGenerator(if_cold->method()) 452 { 453 assert(method() == if_hot->method(), "consistent choices"); 454 _call_info = ci; 455 _if_cold = if_cold; 456 _if_hot = if_hot; 457 _is_virtual = if_cold->is_virtual(); 458 _is_inline = if_hot->is_inline(); 459 } 460 461 virtual bool is_inline() const { return _is_inline; } 462 virtual bool is_virtual() const { return _is_virtual; } 463 virtual bool is_deferred() const { return true; } 464 465 virtual JVMState* generate(JVMState* jvms); 466 }; 467 468 469 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 470 CallGenerator* if_cold, 471 CallGenerator* if_hot) { 472 return new WarmCallGenerator(ci, if_cold, if_hot); 473 } 474 475 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 476 Compile* C = Compile::current(); 477 if (C->log() != NULL) { 478 C->log()->elem("warm_call bci='%d'", jvms->bci()); 479 } 480 jvms = _if_cold->generate(jvms); 481 if (jvms != NULL) { 482 Node* m = jvms->map()->control(); 483 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 484 if (m->is_Catch()) m = m->in(0); else m = C->top(); 485 if (m->is_Proj()) m = m->in(0); else m = C->top(); 486 if (m->is_CallJava()) { 487 _call_info->set_call(m->as_Call()); 488 _call_info->set_hot_cg(_if_hot); 489 #ifndef PRODUCT 490 if (PrintOpto || PrintOptoInlining) { 491 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 492 tty->print("WCI: "); 493 _call_info->print(); 494 } 495 #endif 496 _call_info->set_heat(_call_info->compute_heat()); 497 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 498 } 499 } 500 return jvms; 501 } 502 503 void WarmCallInfo::make_hot() { 504 Unimplemented(); 505 } 506 507 void WarmCallInfo::make_cold() { 508 // No action: Just dequeue. 509 } 510 511 512 //------------------------PredictedCallGenerator------------------------------ 513 // Internal class which handles all out-of-line calls checking receiver type. 514 class PredictedCallGenerator : public CallGenerator { 515 ciKlass* _predicted_receiver; 516 CallGenerator* _if_missed; 517 CallGenerator* _if_hit; 518 float _hit_prob; 519 520 public: 521 PredictedCallGenerator(ciKlass* predicted_receiver, 522 CallGenerator* if_missed, 523 CallGenerator* if_hit, float hit_prob) 524 : CallGenerator(if_missed->method()) 525 { 526 // The call profile data may predict the hit_prob as extreme as 0 or 1. 527 // Remove the extremes values from the range. 528 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 529 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 530 531 _predicted_receiver = predicted_receiver; 532 _if_missed = if_missed; 533 _if_hit = if_hit; 534 _hit_prob = hit_prob; 535 } 536 537 virtual bool is_virtual() const { return true; } 538 virtual bool is_inline() const { return _if_hit->is_inline(); } 539 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 540 541 virtual JVMState* generate(JVMState* jvms); 542 }; 543 544 545 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 546 CallGenerator* if_missed, 547 CallGenerator* if_hit, 548 float hit_prob) { 549 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 550 } 551 552 553 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 554 GraphKit kit(jvms); 555 PhaseGVN& gvn = kit.gvn(); 556 // We need an explicit receiver null_check before checking its type. 557 // We share a map with the caller, so his JVMS gets adjusted. 558 Node* receiver = kit.argument(0); 559 560 CompileLog* log = kit.C->log(); 561 if (log != NULL) { 562 log->elem("predicted_call bci='%d' klass='%d'", 563 jvms->bci(), log->identify(_predicted_receiver)); 564 } 565 566 receiver = kit.null_check_receiver(method()); 567 if (kit.stopped()) { 568 return kit.transfer_exceptions_into_jvms(); 569 } 570 571 Node* exact_receiver = receiver; // will get updated in place... 572 Node* slow_ctl = kit.type_check_receiver(receiver, 573 _predicted_receiver, _hit_prob, 574 &exact_receiver); 575 576 SafePointNode* slow_map = NULL; 577 JVMState* slow_jvms; 578 { PreserveJVMState pjvms(&kit); 579 kit.set_control(slow_ctl); 580 if (!kit.stopped()) { 581 slow_jvms = _if_missed->generate(kit.sync_jvms()); 582 assert(slow_jvms != NULL, "miss path must not fail to generate"); 583 kit.add_exception_states_from(slow_jvms); 584 kit.set_map(slow_jvms->map()); 585 if (!kit.stopped()) 586 slow_map = kit.stop(); 587 } 588 } 589 590 if (kit.stopped()) { 591 // Instance exactly does not matches the desired type. 592 kit.set_jvms(slow_jvms); 593 return kit.transfer_exceptions_into_jvms(); 594 } 595 596 // fall through if the instance exactly matches the desired type 597 kit.replace_in_map(receiver, exact_receiver); 598 599 // Make the hot call: 600 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 601 if (new_jvms == NULL) { 602 // Inline failed, so make a direct call. 603 assert(_if_hit->is_inline(), "must have been a failed inline"); 604 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 605 new_jvms = cg->generate(kit.sync_jvms()); 606 } 607 kit.add_exception_states_from(new_jvms); 608 kit.set_jvms(new_jvms); 609 610 // Need to merge slow and fast? 611 if (slow_map == NULL) { 612 // The fast path is the only path remaining. 613 return kit.transfer_exceptions_into_jvms(); 614 } 615 616 if (kit.stopped()) { 617 // Inlined method threw an exception, so it's just the slow path after all. 618 kit.set_jvms(slow_jvms); 619 return kit.transfer_exceptions_into_jvms(); 620 } 621 622 // Finish the diamond. 623 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 624 RegionNode* region = new (kit.C, 3) RegionNode(3); 625 region->init_req(1, kit.control()); 626 region->init_req(2, slow_map->control()); 627 kit.set_control(gvn.transform(region)); 628 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 629 iophi->set_req(2, slow_map->i_o()); 630 kit.set_i_o(gvn.transform(iophi)); 631 kit.merge_memory(slow_map->merged_memory(), region, 2); 632 uint tos = kit.jvms()->stkoff() + kit.sp(); 633 uint limit = slow_map->req(); 634 for (uint i = TypeFunc::Parms; i < limit; i++) { 635 // Skip unused stack slots; fast forward to monoff(); 636 if (i == tos) { 637 i = kit.jvms()->monoff(); 638 if( i >= limit ) break; 639 } 640 Node* m = kit.map()->in(i); 641 Node* n = slow_map->in(i); 642 if (m != n) { 643 const Type* t = gvn.type(m)->meet(gvn.type(n)); 644 Node* phi = PhiNode::make(region, m, t); 645 phi->set_req(2, n); 646 kit.map()->set_req(i, gvn.transform(phi)); 647 } 648 } 649 return kit.transfer_exceptions_into_jvms(); 650 } 651 652 653 //------------------------PredictedDynamicCallGenerator----------------------- 654 // Internal class which handles all out-of-line calls checking receiver type. 655 class PredictedDynamicCallGenerator : public CallGenerator { 656 ciMethodHandle* _predicted_method_handle; 657 CallGenerator* _if_missed; 658 CallGenerator* _if_hit; 659 float _hit_prob; 660 661 public: 662 PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle, 663 CallGenerator* if_missed, 664 CallGenerator* if_hit, 665 float hit_prob) 666 : CallGenerator(if_missed->method()), 667 _predicted_method_handle(predicted_method_handle), 668 _if_missed(if_missed), 669 _if_hit(if_hit), 670 _hit_prob(hit_prob) 671 {} 672 673 virtual bool is_inline() const { return _if_hit->is_inline(); } 674 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 675 676 virtual JVMState* generate(JVMState* jvms); 677 }; 678 679 680 CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle, 681 CallGenerator* if_missed, 682 CallGenerator* if_hit, 683 float hit_prob) { 684 return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob); 685 } 686 687 688 JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) { 689 GraphKit kit(jvms); 690 PhaseGVN& gvn = kit.gvn(); 691 692 CompileLog* log = kit.C->log(); 693 if (log != NULL) { 694 log->elem("predicted_dynamic_call bci='%d'", jvms->bci()); 695 } 696 697 // Get the constant pool cache from the caller class. 698 ciMethod* caller_method = jvms->method(); 699 ciBytecodeStream str(caller_method); 700 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. 701 ciCPCache* cpcache = str.get_cpcache(); 702 703 // Get the offset of the CallSite from the constant pool cache 704 // pointer. 705 int index = str.get_method_index(); 706 size_t call_site_offset = cpcache->get_f1_offset(index); 707 708 // Load the CallSite object from the constant pool cache. 709 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache); 710 Node* cpcache_adr = kit.makecon(cpcache_ptr); 711 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset); 712 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); 713 714 // Load the target MethodHandle from the CallSite object. 715 Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes()); 716 Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT); 717 718 // Check if the MethodHandle is still the same. 719 const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true); 720 Node* predicted_mh = kit.makecon(predicted_mh_ptr); 721 722 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh)); 723 Node* bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) ); 724 IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN); 725 kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff))); 726 Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff)); 727 728 SafePointNode* slow_map = NULL; 729 JVMState* slow_jvms; 730 { PreserveJVMState pjvms(&kit); 731 kit.set_control(slow_ctl); 732 if (!kit.stopped()) { 733 slow_jvms = _if_missed->generate(kit.sync_jvms()); 734 assert(slow_jvms != NULL, "miss path must not fail to generate"); 735 kit.add_exception_states_from(slow_jvms); 736 kit.set_map(slow_jvms->map()); 737 if (!kit.stopped()) 738 slow_map = kit.stop(); 739 } 740 } 741 742 if (kit.stopped()) { 743 // Instance exactly does not matches the desired type. 744 kit.set_jvms(slow_jvms); 745 return kit.transfer_exceptions_into_jvms(); 746 } 747 748 // Make the hot call: 749 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 750 if (new_jvms == NULL) { 751 // Inline failed, so make a direct call. 752 assert(_if_hit->is_inline(), "must have been a failed inline"); 753 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 754 new_jvms = cg->generate(kit.sync_jvms()); 755 } 756 kit.add_exception_states_from(new_jvms); 757 kit.set_jvms(new_jvms); 758 759 // Need to merge slow and fast? 760 if (slow_map == NULL) { 761 // The fast path is the only path remaining. 762 return kit.transfer_exceptions_into_jvms(); 763 } 764 765 if (kit.stopped()) { 766 // Inlined method threw an exception, so it's just the slow path after all. 767 kit.set_jvms(slow_jvms); 768 return kit.transfer_exceptions_into_jvms(); 769 } 770 771 // Finish the diamond. 772 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 773 RegionNode* region = new (kit.C, 3) RegionNode(3); 774 region->init_req(1, kit.control()); 775 region->init_req(2, slow_map->control()); 776 kit.set_control(gvn.transform(region)); 777 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 778 iophi->set_req(2, slow_map->i_o()); 779 kit.set_i_o(gvn.transform(iophi)); 780 kit.merge_memory(slow_map->merged_memory(), region, 2); 781 uint tos = kit.jvms()->stkoff() + kit.sp(); 782 uint limit = slow_map->req(); 783 for (uint i = TypeFunc::Parms; i < limit; i++) { 784 // Skip unused stack slots; fast forward to monoff(); 785 if (i == tos) { 786 i = kit.jvms()->monoff(); 787 if( i >= limit ) break; 788 } 789 Node* m = kit.map()->in(i); 790 Node* n = slow_map->in(i); 791 if (m != n) { 792 const Type* t = gvn.type(m)->meet(gvn.type(n)); 793 Node* phi = PhiNode::make(region, m, t); 794 phi->set_req(2, n); 795 kit.map()->set_req(i, gvn.transform(phi)); 796 } 797 } 798 return kit.transfer_exceptions_into_jvms(); 799 } 800 801 802 //-------------------------UncommonTrapCallGenerator----------------------------- 803 // Internal class which handles all out-of-line calls checking receiver type. 804 class UncommonTrapCallGenerator : public CallGenerator { 805 Deoptimization::DeoptReason _reason; 806 Deoptimization::DeoptAction _action; 807 808 public: 809 UncommonTrapCallGenerator(ciMethod* m, 810 Deoptimization::DeoptReason reason, 811 Deoptimization::DeoptAction action) 812 : CallGenerator(m) 813 { 814 _reason = reason; 815 _action = action; 816 } 817 818 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 819 virtual bool is_trap() const { return true; } 820 821 virtual JVMState* generate(JVMState* jvms); 822 }; 823 824 825 CallGenerator* 826 CallGenerator::for_uncommon_trap(ciMethod* m, 827 Deoptimization::DeoptReason reason, 828 Deoptimization::DeoptAction action) { 829 return new UncommonTrapCallGenerator(m, reason, action); 830 } 831 832 833 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 834 GraphKit kit(jvms); 835 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 836 int nargs = method()->arg_size(); 837 kit.inc_sp(nargs); 838 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 839 if (_reason == Deoptimization::Reason_class_check && 840 _action == Deoptimization::Action_maybe_recompile) { 841 // Temp fix for 6529811 842 // Don't allow uncommon_trap to override our decision to recompile in the event 843 // of a class cast failure for a monomorphic call as it will never let us convert 844 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 845 bool keep_exact_action = true; 846 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 847 } else { 848 kit.uncommon_trap(_reason, _action); 849 } 850 return kit.transfer_exceptions_into_jvms(); 851 } 852 853 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 854 855 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 856 857 #define NODES_OVERHEAD_PER_METHOD (30.0) 858 #define NODES_PER_BYTECODE (9.5) 859 860 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 861 int call_count = profile.count(); 862 int code_size = call_method->code_size(); 863 864 // Expected execution count is based on the historical count: 865 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 866 867 // Expected profit from inlining, in units of simple call-overheads. 868 _profit = 1.0; 869 870 // Expected work performed by the call in units of call-overheads. 871 // %%% need an empirical curve fit for "work" (time in call) 872 float bytecodes_per_call = 3; 873 _work = 1.0 + code_size / bytecodes_per_call; 874 875 // Expected size of compilation graph: 876 // -XX:+PrintParseStatistics once reported: 877 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 878 // Histogram of 144298 parsed bytecodes: 879 // %%% Need an better predictor for graph size. 880 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 881 } 882 883 // is_cold: Return true if the node should never be inlined. 884 // This is true if any of the key metrics are extreme. 885 bool WarmCallInfo::is_cold() const { 886 if (count() < WarmCallMinCount) return true; 887 if (profit() < WarmCallMinProfit) return true; 888 if (work() > WarmCallMaxWork) return true; 889 if (size() > WarmCallMaxSize) return true; 890 return false; 891 } 892 893 // is_hot: Return true if the node should be inlined immediately. 894 // This is true if any of the key metrics are extreme. 895 bool WarmCallInfo::is_hot() const { 896 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 897 if (count() >= HotCallCountThreshold) return true; 898 if (profit() >= HotCallProfitThreshold) return true; 899 if (work() <= HotCallTrivialWork) return true; 900 if (size() <= HotCallTrivialSize) return true; 901 return false; 902 } 903 904 // compute_heat: 905 float WarmCallInfo::compute_heat() const { 906 assert(!is_cold(), "compute heat only on warm nodes"); 907 assert(!is_hot(), "compute heat only on warm nodes"); 908 int min_size = MAX2(0, (int)HotCallTrivialSize); 909 int max_size = MIN2(500, (int)WarmCallMaxSize); 910 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 911 float size_factor; 912 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 913 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 914 else if (method_size < 0.5) size_factor = 1; // better than avg. 915 else size_factor = 0.5; // worse than avg. 916 return (count() * profit() * size_factor); 917 } 918 919 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 920 assert(this != that, "compare only different WCIs"); 921 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 922 if (this->heat() > that->heat()) return true; 923 if (this->heat() < that->heat()) return false; 924 assert(this->heat() == that->heat(), "no NaN heat allowed"); 925 // Equal heat. Break the tie some other way. 926 if (!this->call() || !that->call()) return (address)this > (address)that; 927 return this->call()->_idx > that->call()->_idx; 928 } 929 930 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 931 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 932 933 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 934 assert(next() == UNINIT_NEXT, "not yet on any list"); 935 WarmCallInfo* prev_p = NULL; 936 WarmCallInfo* next_p = head; 937 while (next_p != NULL && next_p->warmer_than(this)) { 938 prev_p = next_p; 939 next_p = prev_p->next(); 940 } 941 // Install this between prev_p and next_p. 942 this->set_next(next_p); 943 if (prev_p == NULL) 944 head = this; 945 else 946 prev_p->set_next(this); 947 return head; 948 } 949 950 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 951 WarmCallInfo* prev_p = NULL; 952 WarmCallInfo* next_p = head; 953 while (next_p != this) { 954 assert(next_p != NULL, "this must be in the list somewhere"); 955 prev_p = next_p; 956 next_p = prev_p->next(); 957 } 958 next_p = this->next(); 959 debug_only(this->set_next(UNINIT_NEXT)); 960 // Remove this from between prev_p and next_p. 961 if (prev_p == NULL) 962 head = next_p; 963 else 964 prev_p->set_next(next_p); 965 return head; 966 } 967 968 WarmCallInfo* WarmCallInfo::_always_hot = NULL; 969 WarmCallInfo* WarmCallInfo::_always_cold = NULL; 970 971 WarmCallInfo* WarmCallInfo::always_hot() { 972 if (_always_hot == NULL) { 973 static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; 974 WarmCallInfo* ci = (WarmCallInfo*) bits; 975 ci->_profit = ci->_count = MAX_VALUE(); 976 ci->_work = ci->_size = MIN_VALUE(); 977 _always_hot = ci; 978 } 979 assert(_always_hot->is_hot(), "must always be hot"); 980 return _always_hot; 981 } 982 983 WarmCallInfo* WarmCallInfo::always_cold() { 984 if (_always_cold == NULL) { 985 static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; 986 WarmCallInfo* ci = (WarmCallInfo*) bits; 987 ci->_profit = ci->_count = MIN_VALUE(); 988 ci->_work = ci->_size = MAX_VALUE(); 989 _always_cold = ci; 990 } 991 assert(_always_cold->is_cold(), "must always be cold"); 992 return _always_cold; 993 } 994 995 996 #ifndef PRODUCT 997 998 void WarmCallInfo::print() const { 999 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1000 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1001 count(), profit(), work(), size(), compute_heat(), next()); 1002 tty->cr(); 1003 if (call() != NULL) call()->dump(); 1004 } 1005 1006 void print_wci(WarmCallInfo* ci) { 1007 ci->print(); 1008 } 1009 1010 void WarmCallInfo::print_all() const { 1011 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1012 p->print(); 1013 } 1014 1015 int WarmCallInfo::count_all() const { 1016 int cnt = 0; 1017 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1018 cnt++; 1019 return cnt; 1020 } 1021 1022 #endif //PRODUCT