1 /* 2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciCPCache.hpp" 29 #include "ci/ciMethodHandle.hpp" 30 #include "classfile/javaClasses.hpp" 31 #include "compiler/compileLog.hpp" 32 #include "opto/addnode.hpp" 33 #include "opto/callGenerator.hpp" 34 #include "opto/callnode.hpp" 35 #include "opto/cfgnode.hpp" 36 #include "opto/connode.hpp" 37 #include "opto/parse.hpp" 38 #include "opto/rootnode.hpp" 39 #include "opto/runtime.hpp" 40 #include "opto/subnode.hpp" 41 42 CallGenerator::CallGenerator(ciMethod* method) { 43 _method = method; 44 } 45 46 // Utility function. 47 const TypeFunc* CallGenerator::tf() const { 48 return TypeFunc::make(method()); 49 } 50 51 //-----------------------------ParseGenerator--------------------------------- 52 // Internal class which handles all direct bytecode traversal. 53 class ParseGenerator : public InlineCallGenerator { 54 private: 55 bool _is_osr; 56 float _expected_uses; 57 58 public: 59 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 60 : InlineCallGenerator(method) 61 { 62 _is_osr = is_osr; 63 _expected_uses = expected_uses; 64 assert(can_parse(method, is_osr), "parse must be possible"); 65 } 66 67 // Can we build either an OSR or a regular parser for this method? 68 static bool can_parse(ciMethod* method, int is_osr = false); 69 70 virtual bool is_parse() const { return true; } 71 virtual JVMState* generate(JVMState* jvms); 72 int is_osr() { return _is_osr; } 73 74 }; 75 76 JVMState* ParseGenerator::generate(JVMState* jvms) { 77 Compile* C = Compile::current(); 78 79 if (is_osr()) { 80 // The JVMS for a OSR has a single argument (see its TypeFunc). 81 assert(jvms->depth() == 1, "no inline OSR"); 82 } 83 84 if (C->failing()) { 85 return NULL; // bailing out of the compile; do not try to parse 86 } 87 88 Parse parser(jvms, method(), _expected_uses); 89 // Grab signature for matching/allocation 90 #ifdef ASSERT 91 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 92 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 93 assert(C->env()->system_dictionary_modification_counter_changed(), 94 "Must invalidate if TypeFuncs differ"); 95 } 96 #endif 97 98 GraphKit& exits = parser.exits(); 99 100 if (C->failing()) { 101 while (exits.pop_exception_state() != NULL) ; 102 return NULL; 103 } 104 105 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 106 107 // Simply return the exit state of the parser, 108 // augmented by any exceptional states. 109 return exits.transfer_exceptions_into_jvms(); 110 } 111 112 //---------------------------DirectCallGenerator------------------------------ 113 // Internal class which handles all out-of-line calls w/o receiver type checks. 114 class DirectCallGenerator : public CallGenerator { 115 private: 116 CallStaticJavaNode* _call_node; 117 // Force separate memory and I/O projections for the exceptional 118 // paths to facilitate late inlinig. 119 bool _separate_io_proj; 120 121 public: 122 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 123 : CallGenerator(method), 124 _separate_io_proj(separate_io_proj) 125 { 126 } 127 virtual JVMState* generate(JVMState* jvms); 128 129 CallStaticJavaNode* call_node() const { return _call_node; } 130 }; 131 132 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 133 GraphKit kit(jvms); 134 bool is_static = method()->is_static(); 135 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 136 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 137 138 if (kit.C->log() != NULL) { 139 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 140 } 141 142 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci()); 143 if (!is_static) { 144 // Make an explicit receiver null_check as part of this call. 145 // Since we share a map with the caller, his JVMS gets adjusted. 146 kit.null_check_receiver(method()); 147 if (kit.stopped()) { 148 // And dump it back to the caller, decorated with any exceptions: 149 return kit.transfer_exceptions_into_jvms(); 150 } 151 // Mark the call node as virtual, sort of: 152 call->set_optimized_virtual(true); 153 if (method()->is_method_handle_invoke()) { 154 call->set_method_handle_invoke(true); 155 } 156 } 157 kit.set_arguments_for_java_call(call); 158 kit.set_edges_for_java_call(call, false, _separate_io_proj); 159 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 160 kit.push_node(method()->return_type()->basic_type(), ret); 161 _call_node = call; // Save the call node in case we need it later 162 return kit.transfer_exceptions_into_jvms(); 163 } 164 165 //---------------------------DynamicCallGenerator----------------------------- 166 // Internal class which handles all out-of-line invokedynamic calls. 167 class DynamicCallGenerator : public CallGenerator { 168 public: 169 DynamicCallGenerator(ciMethod* method) 170 : CallGenerator(method) 171 { 172 } 173 virtual JVMState* generate(JVMState* jvms); 174 }; 175 176 JVMState* DynamicCallGenerator::generate(JVMState* jvms) { 177 GraphKit kit(jvms); 178 179 if (kit.C->log() != NULL) { 180 kit.C->log()->elem("dynamic_call bci='%d'", jvms->bci()); 181 } 182 183 // Get the constant pool cache from the caller class. 184 ciMethod* caller_method = jvms->method(); 185 ciBytecodeStream str(caller_method); 186 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. 187 assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!"); 188 ciCPCache* cpcache = str.get_cpcache(); 189 190 // Get the offset of the CallSite from the constant pool cache 191 // pointer. 192 int index = str.get_method_index(); 193 size_t call_site_offset = cpcache->get_f1_offset(index); 194 195 // Load the CallSite object from the constant pool cache. 196 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache); 197 Node* cpcache_adr = kit.makecon(cpcache_ptr); 198 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset); 199 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); 200 201 // Load the target MethodHandle from the CallSite object. 202 Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes()); 203 Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT); 204 205 address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub(); 206 207 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci()); 208 // invokedynamic is treated as an optimized invokevirtual. 209 call->set_optimized_virtual(true); 210 // Take extra care (in the presence of argument motion) not to trash the SP: 211 call->set_method_handle_invoke(true); 212 213 // Pass the target MethodHandle as first argument and shift the 214 // other arguments. 215 call->init_req(0 + TypeFunc::Parms, target_mh); 216 uint nargs = call->method()->arg_size(); 217 for (uint i = 1; i < nargs; i++) { 218 Node* arg = kit.argument(i - 1); 219 call->init_req(i + TypeFunc::Parms, arg); 220 } 221 222 kit.set_edges_for_java_call(call); 223 Node* ret = kit.set_results_for_java_call(call); 224 kit.push_node(method()->return_type()->basic_type(), ret); 225 return kit.transfer_exceptions_into_jvms(); 226 } 227 228 //--------------------------VirtualCallGenerator------------------------------ 229 // Internal class which handles all out-of-line calls checking receiver type. 230 class VirtualCallGenerator : public CallGenerator { 231 private: 232 int _vtable_index; 233 public: 234 VirtualCallGenerator(ciMethod* method, int vtable_index) 235 : CallGenerator(method), _vtable_index(vtable_index) 236 { 237 assert(vtable_index == methodOopDesc::invalid_vtable_index || 238 vtable_index >= 0, "either invalid or usable"); 239 } 240 virtual bool is_virtual() const { return true; } 241 virtual JVMState* generate(JVMState* jvms); 242 }; 243 244 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 245 GraphKit kit(jvms); 246 Node* receiver = kit.argument(0); 247 248 if (kit.C->log() != NULL) { 249 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 250 } 251 252 // If the receiver is a constant null, do not torture the system 253 // by attempting to call through it. The compile will proceed 254 // correctly, but may bail out in final_graph_reshaping, because 255 // the call instruction will have a seemingly deficient out-count. 256 // (The bailout says something misleading about an "infinite loop".) 257 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 258 kit.inc_sp(method()->arg_size()); // restore arguments 259 kit.uncommon_trap(Deoptimization::Reason_null_check, 260 Deoptimization::Action_none, 261 NULL, "null receiver"); 262 return kit.transfer_exceptions_into_jvms(); 263 } 264 265 // Ideally we would unconditionally do a null check here and let it 266 // be converted to an implicit check based on profile information. 267 // However currently the conversion to implicit null checks in 268 // Block::implicit_null_check() only looks for loads and stores, not calls. 269 ciMethod *caller = kit.method(); 270 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 271 if (!UseInlineCaches || !ImplicitNullChecks || 272 ((ImplicitNullCheckThreshold > 0) && caller_md && 273 (caller_md->trap_count(Deoptimization::Reason_null_check) 274 >= (uint)ImplicitNullCheckThreshold))) { 275 // Make an explicit receiver null_check as part of this call. 276 // Since we share a map with the caller, his JVMS gets adjusted. 277 receiver = kit.null_check_receiver(method()); 278 if (kit.stopped()) { 279 // And dump it back to the caller, decorated with any exceptions: 280 return kit.transfer_exceptions_into_jvms(); 281 } 282 } 283 284 assert(!method()->is_static(), "virtual call must not be to static"); 285 assert(!method()->is_final(), "virtual call should not be to final"); 286 assert(!method()->is_private(), "virtual call should not be to private"); 287 assert(_vtable_index == methodOopDesc::invalid_vtable_index || !UseInlineCaches, 288 "no vtable calls if +UseInlineCaches "); 289 address target = SharedRuntime::get_resolve_virtual_call_stub(); 290 // Normal inline cache used for call 291 CallDynamicJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 292 kit.set_arguments_for_java_call(call); 293 kit.set_edges_for_java_call(call); 294 Node* ret = kit.set_results_for_java_call(call); 295 kit.push_node(method()->return_type()->basic_type(), ret); 296 297 // Represent the effect of an implicit receiver null_check 298 // as part of this call. Since we share a map with the caller, 299 // his JVMS gets adjusted. 300 kit.cast_not_null(receiver); 301 return kit.transfer_exceptions_into_jvms(); 302 } 303 304 bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) { 305 // Certain methods cannot be parsed at all: 306 if (!m->can_be_compiled()) return false; 307 if (!m->has_balanced_monitors()) return false; 308 if (m->get_flow_analysis()->failing()) return false; 309 310 // (Methods may bail out for other reasons, after the parser is run. 311 // We try to avoid this, but if forced, we must return (Node*)NULL. 312 // The user of the CallGenerator must check for this condition.) 313 return true; 314 } 315 316 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 317 if (!ParseGenerator::can_parse(m)) return NULL; 318 return new ParseGenerator(m, expected_uses); 319 } 320 321 // As a special case, the JVMS passed to this CallGenerator is 322 // for the method execution already in progress, not just the JVMS 323 // of the caller. Thus, this CallGenerator cannot be mixed with others! 324 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 325 if (!ParseGenerator::can_parse(m, true)) return NULL; 326 float past_uses = m->interpreter_invocation_count(); 327 float expected_uses = past_uses; 328 return new ParseGenerator(m, expected_uses, true); 329 } 330 331 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 332 assert(!m->is_abstract(), "for_direct_call mismatch"); 333 return new DirectCallGenerator(m, separate_io_proj); 334 } 335 336 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) { 337 assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch"); 338 return new DynamicCallGenerator(m); 339 } 340 341 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 342 assert(!m->is_static(), "for_virtual_call mismatch"); 343 assert(!m->is_method_handle_invoke(), "should be a direct call"); 344 return new VirtualCallGenerator(m, vtable_index); 345 } 346 347 // Allow inlining decisions to be delayed 348 class LateInlineCallGenerator : public DirectCallGenerator { 349 CallGenerator* _inline_cg; 350 351 public: 352 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 353 DirectCallGenerator(method, true), _inline_cg(inline_cg) {} 354 355 virtual bool is_late_inline() const { return true; } 356 357 // Convert the CallStaticJava into an inline 358 virtual void do_late_inline(); 359 360 JVMState* generate(JVMState* jvms) { 361 // Record that this call site should be revisited once the main 362 // parse is finished. 363 Compile::current()->add_late_inline(this); 364 365 // Emit the CallStaticJava and request separate projections so 366 // that the late inlining logic can distinguish between fall 367 // through and exceptional uses of the memory and io projections 368 // as is done for allocations and macro expansion. 369 return DirectCallGenerator::generate(jvms); 370 } 371 372 }; 373 374 375 void LateInlineCallGenerator::do_late_inline() { 376 // Can't inline it 377 if (call_node() == NULL || call_node()->outcnt() == 0 || 378 call_node()->in(0) == NULL || call_node()->in(0)->is_top()) 379 return; 380 381 CallStaticJavaNode* call = call_node(); 382 383 // Make a clone of the JVMState that appropriate to use for driving a parse 384 Compile* C = Compile::current(); 385 JVMState* jvms = call->jvms()->clone_shallow(C); 386 uint size = call->req(); 387 SafePointNode* map = new (C, size) SafePointNode(size, jvms); 388 for (uint i1 = 0; i1 < size; i1++) { 389 map->init_req(i1, call->in(i1)); 390 } 391 392 // Make sure the state is a MergeMem for parsing. 393 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 394 map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory))); 395 } 396 397 // Make enough space for the expression stack and transfer the incoming arguments 398 int nargs = method()->arg_size(); 399 jvms->set_map(map); 400 map->ensure_stack(jvms, jvms->method()->max_stack()); 401 if (nargs > 0) { 402 for (int i1 = 0; i1 < nargs; i1++) { 403 map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1)); 404 } 405 } 406 407 CompileLog* log = C->log(); 408 if (log != NULL) { 409 log->head("late_inline method='%d'", log->identify(method())); 410 JVMState* p = jvms; 411 while (p != NULL) { 412 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 413 p = p->caller(); 414 } 415 log->tail("late_inline"); 416 } 417 418 // Setup default node notes to be picked up by the inlining 419 Node_Notes* old_nn = C->default_node_notes(); 420 if (old_nn != NULL) { 421 Node_Notes* entry_nn = old_nn->clone(C); 422 entry_nn->set_jvms(jvms); 423 C->set_default_node_notes(entry_nn); 424 } 425 426 // Now perform the inling using the synthesized JVMState 427 JVMState* new_jvms = _inline_cg->generate(jvms); 428 if (new_jvms == NULL) return; // no change 429 if (C->failing()) return; 430 431 // Capture any exceptional control flow 432 GraphKit kit(new_jvms); 433 434 // Find the result object 435 Node* result = C->top(); 436 int result_size = method()->return_type()->size(); 437 if (result_size != 0 && !kit.stopped()) { 438 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 439 } 440 441 kit.replace_call(call, result); 442 } 443 444 445 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 446 return new LateInlineCallGenerator(method, inline_cg); 447 } 448 449 450 //---------------------------WarmCallGenerator-------------------------------- 451 // Internal class which handles initial deferral of inlining decisions. 452 class WarmCallGenerator : public CallGenerator { 453 WarmCallInfo* _call_info; 454 CallGenerator* _if_cold; 455 CallGenerator* _if_hot; 456 bool _is_virtual; // caches virtuality of if_cold 457 bool _is_inline; // caches inline-ness of if_hot 458 459 public: 460 WarmCallGenerator(WarmCallInfo* ci, 461 CallGenerator* if_cold, 462 CallGenerator* if_hot) 463 : CallGenerator(if_cold->method()) 464 { 465 assert(method() == if_hot->method(), "consistent choices"); 466 _call_info = ci; 467 _if_cold = if_cold; 468 _if_hot = if_hot; 469 _is_virtual = if_cold->is_virtual(); 470 _is_inline = if_hot->is_inline(); 471 } 472 473 virtual bool is_inline() const { return _is_inline; } 474 virtual bool is_virtual() const { return _is_virtual; } 475 virtual bool is_deferred() const { return true; } 476 477 virtual JVMState* generate(JVMState* jvms); 478 }; 479 480 481 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 482 CallGenerator* if_cold, 483 CallGenerator* if_hot) { 484 return new WarmCallGenerator(ci, if_cold, if_hot); 485 } 486 487 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 488 Compile* C = Compile::current(); 489 if (C->log() != NULL) { 490 C->log()->elem("warm_call bci='%d'", jvms->bci()); 491 } 492 jvms = _if_cold->generate(jvms); 493 if (jvms != NULL) { 494 Node* m = jvms->map()->control(); 495 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 496 if (m->is_Catch()) m = m->in(0); else m = C->top(); 497 if (m->is_Proj()) m = m->in(0); else m = C->top(); 498 if (m->is_CallJava()) { 499 _call_info->set_call(m->as_Call()); 500 _call_info->set_hot_cg(_if_hot); 501 #ifndef PRODUCT 502 if (PrintOpto || PrintOptoInlining) { 503 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 504 tty->print("WCI: "); 505 _call_info->print(); 506 } 507 #endif 508 _call_info->set_heat(_call_info->compute_heat()); 509 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 510 } 511 } 512 return jvms; 513 } 514 515 void WarmCallInfo::make_hot() { 516 Unimplemented(); 517 } 518 519 void WarmCallInfo::make_cold() { 520 // No action: Just dequeue. 521 } 522 523 524 //------------------------PredictedCallGenerator------------------------------ 525 // Internal class which handles all out-of-line calls checking receiver type. 526 class PredictedCallGenerator : public CallGenerator { 527 ciKlass* _predicted_receiver; 528 CallGenerator* _if_missed; 529 CallGenerator* _if_hit; 530 float _hit_prob; 531 532 public: 533 PredictedCallGenerator(ciKlass* predicted_receiver, 534 CallGenerator* if_missed, 535 CallGenerator* if_hit, float hit_prob) 536 : CallGenerator(if_missed->method()) 537 { 538 // The call profile data may predict the hit_prob as extreme as 0 or 1. 539 // Remove the extremes values from the range. 540 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 541 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 542 543 _predicted_receiver = predicted_receiver; 544 _if_missed = if_missed; 545 _if_hit = if_hit; 546 _hit_prob = hit_prob; 547 } 548 549 virtual bool is_virtual() const { return true; } 550 virtual bool is_inline() const { return _if_hit->is_inline(); } 551 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 552 553 virtual JVMState* generate(JVMState* jvms); 554 }; 555 556 557 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 558 CallGenerator* if_missed, 559 CallGenerator* if_hit, 560 float hit_prob) { 561 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 562 } 563 564 565 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 566 GraphKit kit(jvms); 567 PhaseGVN& gvn = kit.gvn(); 568 // We need an explicit receiver null_check before checking its type. 569 // We share a map with the caller, so his JVMS gets adjusted. 570 Node* receiver = kit.argument(0); 571 572 CompileLog* log = kit.C->log(); 573 if (log != NULL) { 574 log->elem("predicted_call bci='%d' klass='%d'", 575 jvms->bci(), log->identify(_predicted_receiver)); 576 } 577 578 receiver = kit.null_check_receiver(method()); 579 if (kit.stopped()) { 580 return kit.transfer_exceptions_into_jvms(); 581 } 582 583 Node* exact_receiver = receiver; // will get updated in place... 584 Node* slow_ctl = kit.type_check_receiver(receiver, 585 _predicted_receiver, _hit_prob, 586 &exact_receiver); 587 588 SafePointNode* slow_map = NULL; 589 JVMState* slow_jvms; 590 { PreserveJVMState pjvms(&kit); 591 kit.set_control(slow_ctl); 592 if (!kit.stopped()) { 593 slow_jvms = _if_missed->generate(kit.sync_jvms()); 594 assert(slow_jvms != NULL, "miss path must not fail to generate"); 595 kit.add_exception_states_from(slow_jvms); 596 kit.set_map(slow_jvms->map()); 597 if (!kit.stopped()) 598 slow_map = kit.stop(); 599 } 600 } 601 602 if (kit.stopped()) { 603 // Instance exactly does not matches the desired type. 604 kit.set_jvms(slow_jvms); 605 return kit.transfer_exceptions_into_jvms(); 606 } 607 608 // fall through if the instance exactly matches the desired type 609 kit.replace_in_map(receiver, exact_receiver); 610 611 // Make the hot call: 612 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 613 if (new_jvms == NULL) { 614 // Inline failed, so make a direct call. 615 assert(_if_hit->is_inline(), "must have been a failed inline"); 616 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 617 new_jvms = cg->generate(kit.sync_jvms()); 618 } 619 kit.add_exception_states_from(new_jvms); 620 kit.set_jvms(new_jvms); 621 622 // Need to merge slow and fast? 623 if (slow_map == NULL) { 624 // The fast path is the only path remaining. 625 return kit.transfer_exceptions_into_jvms(); 626 } 627 628 if (kit.stopped()) { 629 // Inlined method threw an exception, so it's just the slow path after all. 630 kit.set_jvms(slow_jvms); 631 return kit.transfer_exceptions_into_jvms(); 632 } 633 634 // Finish the diamond. 635 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 636 RegionNode* region = new (kit.C, 3) RegionNode(3); 637 region->init_req(1, kit.control()); 638 region->init_req(2, slow_map->control()); 639 kit.set_control(gvn.transform(region)); 640 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 641 iophi->set_req(2, slow_map->i_o()); 642 kit.set_i_o(gvn.transform(iophi)); 643 kit.merge_memory(slow_map->merged_memory(), region, 2); 644 uint tos = kit.jvms()->stkoff() + kit.sp(); 645 uint limit = slow_map->req(); 646 for (uint i = TypeFunc::Parms; i < limit; i++) { 647 // Skip unused stack slots; fast forward to monoff(); 648 if (i == tos) { 649 i = kit.jvms()->monoff(); 650 if( i >= limit ) break; 651 } 652 Node* m = kit.map()->in(i); 653 Node* n = slow_map->in(i); 654 if (m != n) { 655 const Type* t = gvn.type(m)->meet(gvn.type(n)); 656 Node* phi = PhiNode::make(region, m, t); 657 phi->set_req(2, n); 658 kit.map()->set_req(i, gvn.transform(phi)); 659 } 660 } 661 return kit.transfer_exceptions_into_jvms(); 662 } 663 664 665 //------------------------PredictedDynamicCallGenerator----------------------- 666 // Internal class which handles all out-of-line calls checking receiver type. 667 class PredictedDynamicCallGenerator : public CallGenerator { 668 ciMethodHandle* _predicted_method_handle; 669 CallGenerator* _if_missed; 670 CallGenerator* _if_hit; 671 float _hit_prob; 672 673 public: 674 PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle, 675 CallGenerator* if_missed, 676 CallGenerator* if_hit, 677 float hit_prob) 678 : CallGenerator(if_missed->method()), 679 _predicted_method_handle(predicted_method_handle), 680 _if_missed(if_missed), 681 _if_hit(if_hit), 682 _hit_prob(hit_prob) 683 {} 684 685 virtual bool is_inline() const { return _if_hit->is_inline(); } 686 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 687 688 virtual JVMState* generate(JVMState* jvms); 689 }; 690 691 692 CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle, 693 CallGenerator* if_missed, 694 CallGenerator* if_hit, 695 float hit_prob) { 696 return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob); 697 } 698 699 700 CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms, 701 ciMethod* caller, ciMethod* callee, ciCallProfile profile) { 702 if (method_handle->Opcode() == Op_ConP) { 703 const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr(); 704 ciObject* const_oop = oop_ptr->const_oop(); 705 ciMethodHandle* method_handle = const_oop->as_method_handle(); 706 707 // Set the callee to have access to the class and signature in 708 // the MethodHandleCompiler. 709 method_handle->set_callee(callee); 710 method_handle->set_caller(caller); 711 method_handle->set_call_profile(profile); 712 713 // Get an adapter for the MethodHandle. 714 ciMethod* target_method = method_handle->get_method_handle_adapter(); 715 if (target_method != NULL) { 716 CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); 717 if (cg != NULL && cg->is_inline()) 718 return cg; 719 } 720 } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 && 721 method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) { 722 float prob = PROB_FAIR; 723 Node* meth_region = method_handle->in(0); 724 if (meth_region->is_Region() && 725 meth_region->in(1)->is_Proj() && meth_region->in(2)->is_Proj() && 726 meth_region->in(1)->in(0) == meth_region->in(2)->in(0) && 727 meth_region->in(1)->in(0)->is_If()) { 728 // If diamond, so grab the probability of the test to drive the inlining below 729 prob = meth_region->in(1)->in(0)->as_If()->_prob; 730 if (meth_region->in(1)->is_IfTrue()) { 731 prob = 1 - prob; 732 } 733 } 734 735 // selectAlternative idiom merging two constant MethodHandles. 736 // Generate a guard so that each can be inlined. We might want to 737 // do more inputs at later point but this gets the most common 738 // case. 739 CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob)); 740 CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile.rescale(prob)); 741 if (cg1 != NULL && cg2 != NULL) { 742 const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr(); 743 ciObject* const_oop = oop_ptr->const_oop(); 744 ciMethodHandle* mh = const_oop->as_method_handle(); 745 return new PredictedDynamicCallGenerator(mh, cg2, cg1, prob); 746 } 747 } 748 return NULL; 749 } 750 751 752 CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, 753 ciMethod* caller, ciMethod* callee, ciCallProfile profile) { 754 assert(call_site->is_constant_call_site() || call_site->is_mutable_call_site(), "must be"); 755 ciMethodHandle* method_handle = call_site->get_target(); 756 757 // Set the callee to have access to the class and signature in the 758 // MethodHandleCompiler. 759 method_handle->set_callee(callee); 760 method_handle->set_caller(caller); 761 method_handle->set_call_profile(profile); 762 763 // Get an adapter for the MethodHandle. 764 ciMethod* target_method = method_handle->get_invokedynamic_adapter(); 765 if (target_method != NULL) { 766 Compile *C = Compile::current(); 767 CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); 768 if (cg != NULL && cg->is_inline()) { 769 // Add a dependence for invalidation of the optimization. 770 if (call_site->is_mutable_call_site()) { 771 C->dependencies()->assert_call_site_target_value(call_site, method_handle); 772 } 773 return cg; 774 } 775 } 776 return NULL; 777 } 778 779 780 JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) { 781 GraphKit kit(jvms); 782 PhaseGVN& gvn = kit.gvn(); 783 784 CompileLog* log = kit.C->log(); 785 if (log != NULL) { 786 log->elem("predicted_dynamic_call bci='%d'", jvms->bci()); 787 } 788 789 const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true); 790 Node* predicted_mh = kit.makecon(predicted_mh_ptr); 791 792 Node* bol = NULL; 793 int bc = jvms->method()->java_code_at_bci(jvms->bci()); 794 if (bc == Bytecodes::_invokespecial) { 795 // This is the selectAlternative idiom for guardWithTest 796 Node* receiver = kit.argument(0); 797 798 // Check if the MethodHandle is the expected one 799 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(receiver, predicted_mh)); 800 bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) ); 801 } else { 802 assert(bc == Bytecodes::_invokedynamic, "must be"); 803 // Get the constant pool cache from the caller class. 804 ciMethod* caller_method = jvms->method(); 805 ciBytecodeStream str(caller_method); 806 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. 807 ciCPCache* cpcache = str.get_cpcache(); 808 809 // Get the offset of the CallSite from the constant pool cache 810 // pointer. 811 int index = str.get_method_index(); 812 size_t call_site_offset = cpcache->get_f1_offset(index); 813 814 // Load the CallSite object from the constant pool cache. 815 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache); 816 Node* cpcache_adr = kit.makecon(cpcache_ptr); 817 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset); 818 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); 819 820 // Load the target MethodHandle from the CallSite object. 821 Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes()); 822 Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT); 823 824 // Check if the MethodHandle is still the same. 825 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh)); 826 bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) ); 827 } 828 IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN); 829 kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff))); 830 Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff)); 831 832 SafePointNode* slow_map = NULL; 833 JVMState* slow_jvms; 834 { PreserveJVMState pjvms(&kit); 835 kit.set_control(slow_ctl); 836 if (!kit.stopped()) { 837 slow_jvms = _if_missed->generate(kit.sync_jvms()); 838 assert(slow_jvms != NULL, "miss path must not fail to generate"); 839 kit.add_exception_states_from(slow_jvms); 840 kit.set_map(slow_jvms->map()); 841 if (!kit.stopped()) 842 slow_map = kit.stop(); 843 } 844 } 845 846 if (kit.stopped()) { 847 // Instance exactly does not matches the desired type. 848 kit.set_jvms(slow_jvms); 849 return kit.transfer_exceptions_into_jvms(); 850 } 851 852 // Make the hot call: 853 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 854 if (new_jvms == NULL) { 855 // Inline failed, so make a direct call. 856 assert(_if_hit->is_inline(), "must have been a failed inline"); 857 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 858 new_jvms = cg->generate(kit.sync_jvms()); 859 } 860 kit.add_exception_states_from(new_jvms); 861 kit.set_jvms(new_jvms); 862 863 // Need to merge slow and fast? 864 if (slow_map == NULL) { 865 // The fast path is the only path remaining. 866 return kit.transfer_exceptions_into_jvms(); 867 } 868 869 if (kit.stopped()) { 870 // Inlined method threw an exception, so it's just the slow path after all. 871 kit.set_jvms(slow_jvms); 872 return kit.transfer_exceptions_into_jvms(); 873 } 874 875 // Finish the diamond. 876 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 877 RegionNode* region = new (kit.C, 3) RegionNode(3); 878 region->init_req(1, kit.control()); 879 region->init_req(2, slow_map->control()); 880 kit.set_control(gvn.transform(region)); 881 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 882 iophi->set_req(2, slow_map->i_o()); 883 kit.set_i_o(gvn.transform(iophi)); 884 kit.merge_memory(slow_map->merged_memory(), region, 2); 885 uint tos = kit.jvms()->stkoff() + kit.sp(); 886 uint limit = slow_map->req(); 887 for (uint i = TypeFunc::Parms; i < limit; i++) { 888 // Skip unused stack slots; fast forward to monoff(); 889 if (i == tos) { 890 i = kit.jvms()->monoff(); 891 if( i >= limit ) break; 892 } 893 Node* m = kit.map()->in(i); 894 Node* n = slow_map->in(i); 895 if (m != n) { 896 const Type* t = gvn.type(m)->meet(gvn.type(n)); 897 Node* phi = PhiNode::make(region, m, t); 898 phi->set_req(2, n); 899 kit.map()->set_req(i, gvn.transform(phi)); 900 } 901 } 902 return kit.transfer_exceptions_into_jvms(); 903 } 904 905 906 //-------------------------UncommonTrapCallGenerator----------------------------- 907 // Internal class which handles all out-of-line calls checking receiver type. 908 class UncommonTrapCallGenerator : public CallGenerator { 909 Deoptimization::DeoptReason _reason; 910 Deoptimization::DeoptAction _action; 911 912 public: 913 UncommonTrapCallGenerator(ciMethod* m, 914 Deoptimization::DeoptReason reason, 915 Deoptimization::DeoptAction action) 916 : CallGenerator(m) 917 { 918 _reason = reason; 919 _action = action; 920 } 921 922 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 923 virtual bool is_trap() const { return true; } 924 925 virtual JVMState* generate(JVMState* jvms); 926 }; 927 928 929 CallGenerator* 930 CallGenerator::for_uncommon_trap(ciMethod* m, 931 Deoptimization::DeoptReason reason, 932 Deoptimization::DeoptAction action) { 933 return new UncommonTrapCallGenerator(m, reason, action); 934 } 935 936 937 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 938 GraphKit kit(jvms); 939 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 940 int nargs = method()->arg_size(); 941 kit.inc_sp(nargs); 942 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 943 if (_reason == Deoptimization::Reason_class_check && 944 _action == Deoptimization::Action_maybe_recompile) { 945 // Temp fix for 6529811 946 // Don't allow uncommon_trap to override our decision to recompile in the event 947 // of a class cast failure for a monomorphic call as it will never let us convert 948 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 949 bool keep_exact_action = true; 950 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 951 } else { 952 kit.uncommon_trap(_reason, _action); 953 } 954 return kit.transfer_exceptions_into_jvms(); 955 } 956 957 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 958 959 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 960 961 #define NODES_OVERHEAD_PER_METHOD (30.0) 962 #define NODES_PER_BYTECODE (9.5) 963 964 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 965 int call_count = profile.count(); 966 int code_size = call_method->code_size(); 967 968 // Expected execution count is based on the historical count: 969 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 970 971 // Expected profit from inlining, in units of simple call-overheads. 972 _profit = 1.0; 973 974 // Expected work performed by the call in units of call-overheads. 975 // %%% need an empirical curve fit for "work" (time in call) 976 float bytecodes_per_call = 3; 977 _work = 1.0 + code_size / bytecodes_per_call; 978 979 // Expected size of compilation graph: 980 // -XX:+PrintParseStatistics once reported: 981 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 982 // Histogram of 144298 parsed bytecodes: 983 // %%% Need an better predictor for graph size. 984 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 985 } 986 987 // is_cold: Return true if the node should never be inlined. 988 // This is true if any of the key metrics are extreme. 989 bool WarmCallInfo::is_cold() const { 990 if (count() < WarmCallMinCount) return true; 991 if (profit() < WarmCallMinProfit) return true; 992 if (work() > WarmCallMaxWork) return true; 993 if (size() > WarmCallMaxSize) return true; 994 return false; 995 } 996 997 // is_hot: Return true if the node should be inlined immediately. 998 // This is true if any of the key metrics are extreme. 999 bool WarmCallInfo::is_hot() const { 1000 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1001 if (count() >= HotCallCountThreshold) return true; 1002 if (profit() >= HotCallProfitThreshold) return true; 1003 if (work() <= HotCallTrivialWork) return true; 1004 if (size() <= HotCallTrivialSize) return true; 1005 return false; 1006 } 1007 1008 // compute_heat: 1009 float WarmCallInfo::compute_heat() const { 1010 assert(!is_cold(), "compute heat only on warm nodes"); 1011 assert(!is_hot(), "compute heat only on warm nodes"); 1012 int min_size = MAX2(0, (int)HotCallTrivialSize); 1013 int max_size = MIN2(500, (int)WarmCallMaxSize); 1014 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1015 float size_factor; 1016 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1017 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1018 else if (method_size < 0.5) size_factor = 1; // better than avg. 1019 else size_factor = 0.5; // worse than avg. 1020 return (count() * profit() * size_factor); 1021 } 1022 1023 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1024 assert(this != that, "compare only different WCIs"); 1025 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1026 if (this->heat() > that->heat()) return true; 1027 if (this->heat() < that->heat()) return false; 1028 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1029 // Equal heat. Break the tie some other way. 1030 if (!this->call() || !that->call()) return (address)this > (address)that; 1031 return this->call()->_idx > that->call()->_idx; 1032 } 1033 1034 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1035 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1036 1037 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1038 assert(next() == UNINIT_NEXT, "not yet on any list"); 1039 WarmCallInfo* prev_p = NULL; 1040 WarmCallInfo* next_p = head; 1041 while (next_p != NULL && next_p->warmer_than(this)) { 1042 prev_p = next_p; 1043 next_p = prev_p->next(); 1044 } 1045 // Install this between prev_p and next_p. 1046 this->set_next(next_p); 1047 if (prev_p == NULL) 1048 head = this; 1049 else 1050 prev_p->set_next(this); 1051 return head; 1052 } 1053 1054 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1055 WarmCallInfo* prev_p = NULL; 1056 WarmCallInfo* next_p = head; 1057 while (next_p != this) { 1058 assert(next_p != NULL, "this must be in the list somewhere"); 1059 prev_p = next_p; 1060 next_p = prev_p->next(); 1061 } 1062 next_p = this->next(); 1063 debug_only(this->set_next(UNINIT_NEXT)); 1064 // Remove this from between prev_p and next_p. 1065 if (prev_p == NULL) 1066 head = next_p; 1067 else 1068 prev_p->set_next(next_p); 1069 return head; 1070 } 1071 1072 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1073 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1074 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1075 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1076 1077 WarmCallInfo* WarmCallInfo::always_hot() { 1078 assert(_always_hot.is_hot(), "must always be hot"); 1079 return &_always_hot; 1080 } 1081 1082 WarmCallInfo* WarmCallInfo::always_cold() { 1083 assert(_always_cold.is_cold(), "must always be cold"); 1084 return &_always_cold; 1085 } 1086 1087 1088 #ifndef PRODUCT 1089 1090 void WarmCallInfo::print() const { 1091 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1092 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1093 count(), profit(), work(), size(), compute_heat(), next()); 1094 tty->cr(); 1095 if (call() != NULL) call()->dump(); 1096 } 1097 1098 void print_wci(WarmCallInfo* ci) { 1099 ci->print(); 1100 } 1101 1102 void WarmCallInfo::print_all() const { 1103 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1104 p->print(); 1105 } 1106 1107 int WarmCallInfo::count_all() const { 1108 int cnt = 0; 1109 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1110 cnt++; 1111 return cnt; 1112 } 1113 1114 #endif //PRODUCT