1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 44 // Utility function. 45 const TypeFunc* CallGenerator::tf() const { 46 return TypeFunc::make(method()); 47 } 48 49 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) { 50 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m); 51 } 52 53 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) { 54 ciMethod* symbolic_info = caller->get_method_at_bci(bci); 55 return is_inlined_method_handle_intrinsic(symbolic_info, m); 56 } 57 58 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) { 59 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic(); 60 } 61 62 //-----------------------------ParseGenerator--------------------------------- 63 // Internal class which handles all direct bytecode traversal. 64 class ParseGenerator : public InlineCallGenerator { 65 private: 66 bool _is_osr; 67 float _expected_uses; 68 69 public: 70 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 71 : InlineCallGenerator(method) 72 { 73 _is_osr = is_osr; 74 _expected_uses = expected_uses; 75 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 76 } 77 78 virtual bool is_parse() const { return true; } 79 virtual JVMState* generate(JVMState* jvms); 80 int is_osr() { return _is_osr; } 81 82 }; 83 84 JVMState* ParseGenerator::generate(JVMState* jvms) { 85 Compile* C = Compile::current(); 86 C->print_inlining_update(this); 87 88 if (is_osr()) { 89 // The JVMS for a OSR has a single argument (see its TypeFunc). 90 assert(jvms->depth() == 1, "no inline OSR"); 91 } 92 93 if (C->failing()) { 94 return NULL; // bailing out of the compile; do not try to parse 95 } 96 97 Parse parser(jvms, method(), _expected_uses); 98 // Grab signature for matching/allocation 99 #ifdef ASSERT 100 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 101 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 102 assert(C->env()->system_dictionary_modification_counter_changed(), 103 "Must invalidate if TypeFuncs differ"); 104 } 105 #endif 106 107 GraphKit& exits = parser.exits(); 108 109 if (C->failing()) { 110 while (exits.pop_exception_state() != NULL) ; 111 return NULL; 112 } 113 114 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 115 116 // Simply return the exit state of the parser, 117 // augmented by any exceptional states. 118 return exits.transfer_exceptions_into_jvms(); 119 } 120 121 //---------------------------DirectCallGenerator------------------------------ 122 // Internal class which handles all out-of-line calls w/o receiver type checks. 123 class DirectCallGenerator : public CallGenerator { 124 private: 125 CallStaticJavaNode* _call_node; 126 // Force separate memory and I/O projections for the exceptional 127 // paths to facilitate late inlinig. 128 bool _separate_io_proj; 129 130 public: 131 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 132 : CallGenerator(method), 133 _separate_io_proj(separate_io_proj) 134 { 135 } 136 virtual JVMState* generate(JVMState* jvms); 137 138 CallStaticJavaNode* call_node() const { return _call_node; } 139 }; 140 141 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 142 GraphKit kit(jvms); 143 kit.C->print_inlining_update(this); 144 bool is_static = method()->is_static(); 145 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 146 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 147 148 if (kit.C->log() != NULL) { 149 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 150 } 151 152 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 153 if (is_inlined_method_handle_intrinsic(jvms, method())) { 154 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter, 155 // additional information about the method being invoked should be attached 156 // to the call site to make resolution logic work 157 // (see SharedRuntime::resolve_static_call_C). 158 call->set_override_symbolic_info(true); 159 } 160 _call_node = call; // Save the call node in case we need it later 161 if (!is_static) { 162 // Make an explicit receiver null_check as part of this call. 163 // Since we share a map with the caller, his JVMS gets adjusted. 164 kit.null_check_receiver_before_call(method()); 165 if (kit.stopped()) { 166 // And dump it back to the caller, decorated with any exceptions: 167 return kit.transfer_exceptions_into_jvms(); 168 } 169 // Mark the call node as virtual, sort of: 170 call->set_optimized_virtual(true); 171 if (method()->is_method_handle_intrinsic() || 172 method()->is_compiled_lambda_form()) { 173 call->set_method_handle_invoke(true); 174 } 175 } 176 kit.set_arguments_for_java_call(call); 177 kit.set_edges_for_java_call(call, false, _separate_io_proj); 178 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 179 kit.push_node(method()->return_type()->basic_type(), ret); 180 return kit.transfer_exceptions_into_jvms(); 181 } 182 183 //--------------------------VirtualCallGenerator------------------------------ 184 // Internal class which handles all out-of-line calls checking receiver type. 185 class VirtualCallGenerator : public CallGenerator { 186 private: 187 int _vtable_index; 188 public: 189 VirtualCallGenerator(ciMethod* method, int vtable_index) 190 : CallGenerator(method), _vtable_index(vtable_index) 191 { 192 assert(vtable_index == Method::invalid_vtable_index || 193 vtable_index >= 0, "either invalid or usable"); 194 } 195 virtual bool is_virtual() const { return true; } 196 virtual JVMState* generate(JVMState* jvms); 197 }; 198 199 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 200 GraphKit kit(jvms); 201 Node* receiver = kit.argument(0); 202 203 kit.C->print_inlining_update(this); 204 205 if (kit.C->log() != NULL) { 206 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 207 } 208 209 // If the receiver is a constant null, do not torture the system 210 // by attempting to call through it. The compile will proceed 211 // correctly, but may bail out in final_graph_reshaping, because 212 // the call instruction will have a seemingly deficient out-count. 213 // (The bailout says something misleading about an "infinite loop".) 214 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 215 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); 216 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 217 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 218 kit.inc_sp(arg_size); // restore arguments 219 kit.uncommon_trap(Deoptimization::Reason_null_check, 220 Deoptimization::Action_none, 221 NULL, "null receiver"); 222 return kit.transfer_exceptions_into_jvms(); 223 } 224 225 // Ideally we would unconditionally do a null check here and let it 226 // be converted to an implicit check based on profile information. 227 // However currently the conversion to implicit null checks in 228 // Block::implicit_null_check() only looks for loads and stores, not calls. 229 ciMethod *caller = kit.method(); 230 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 231 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 232 ((ImplicitNullCheckThreshold > 0) && caller_md && 233 (caller_md->trap_count(Deoptimization::Reason_null_check) 234 >= (uint)ImplicitNullCheckThreshold))) { 235 // Make an explicit receiver null_check as part of this call. 236 // Since we share a map with the caller, his JVMS gets adjusted. 237 receiver = kit.null_check_receiver_before_call(method()); 238 if (kit.stopped()) { 239 // And dump it back to the caller, decorated with any exceptions: 240 return kit.transfer_exceptions_into_jvms(); 241 } 242 } 243 244 assert(!method()->is_static(), "virtual call must not be to static"); 245 assert(!method()->is_final(), "virtual call should not be to final"); 246 assert(!method()->is_private(), "virtual call should not be to private"); 247 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 248 "no vtable calls if +UseInlineCaches "); 249 address target = SharedRuntime::get_resolve_virtual_call_stub(); 250 // Normal inline cache used for call 251 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 252 if (is_inlined_method_handle_intrinsic(jvms, method())) { 253 // To be able to issue a direct call (optimized virtual or virtual) 254 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information 255 // about the method being invoked should be attached to the call site to 256 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C). 257 call->set_override_symbolic_info(true); 258 } 259 kit.set_arguments_for_java_call(call); 260 kit.set_edges_for_java_call(call); 261 Node* ret = kit.set_results_for_java_call(call); 262 kit.push_node(method()->return_type()->basic_type(), ret); 263 264 // Represent the effect of an implicit receiver null_check 265 // as part of this call. Since we share a map with the caller, 266 // his JVMS gets adjusted. 267 kit.cast_not_null(receiver); 268 return kit.transfer_exceptions_into_jvms(); 269 } 270 271 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 272 if (InlineTree::check_can_parse(m) != NULL) return NULL; 273 return new ParseGenerator(m, expected_uses); 274 } 275 276 // As a special case, the JVMS passed to this CallGenerator is 277 // for the method execution already in progress, not just the JVMS 278 // of the caller. Thus, this CallGenerator cannot be mixed with others! 279 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 280 if (InlineTree::check_can_parse(m) != NULL) return NULL; 281 float past_uses = m->interpreter_invocation_count(); 282 float expected_uses = past_uses; 283 return new ParseGenerator(m, expected_uses, true); 284 } 285 286 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 287 assert(!m->is_abstract(), "for_direct_call mismatch"); 288 return new DirectCallGenerator(m, separate_io_proj); 289 } 290 291 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 292 assert(!m->is_static(), "for_virtual_call mismatch"); 293 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 294 return new VirtualCallGenerator(m, vtable_index); 295 } 296 297 // Allow inlining decisions to be delayed 298 class LateInlineCallGenerator : public DirectCallGenerator { 299 private: 300 // unique id for log compilation 301 jlong _unique_id; 302 303 protected: 304 CallGenerator* _inline_cg; 305 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 306 307 public: 308 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 309 DirectCallGenerator(method, true), _unique_id(0), _inline_cg(inline_cg) {} 310 311 virtual bool is_late_inline() const { return true; } 312 313 // Convert the CallStaticJava into an inline 314 virtual void do_late_inline(); 315 316 virtual JVMState* generate(JVMState* jvms) { 317 Compile *C = Compile::current(); 318 319 C->log_inline_id(this); 320 321 // Record that this call site should be revisited once the main 322 // parse is finished. 323 if (!is_mh_late_inline()) { 324 C->add_late_inline(this); 325 } 326 327 // Emit the CallStaticJava and request separate projections so 328 // that the late inlining logic can distinguish between fall 329 // through and exceptional uses of the memory and io projections 330 // as is done for allocations and macro expansion. 331 return DirectCallGenerator::generate(jvms); 332 } 333 334 virtual void print_inlining_late(const char* msg) { 335 CallNode* call = call_node(); 336 Compile* C = Compile::current(); 337 C->print_inlining_assert_ready(); 338 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 339 C->print_inlining_move_to(this); 340 C->print_inlining_update_delayed(this); 341 } 342 343 virtual void set_unique_id(jlong id) { 344 _unique_id = id; 345 } 346 347 virtual jlong unique_id() const { 348 return _unique_id; 349 } 350 }; 351 352 void LateInlineCallGenerator::do_late_inline() { 353 // Can't inline it 354 CallStaticJavaNode* call = call_node(); 355 if (call == NULL || call->outcnt() == 0 || 356 call->in(0) == NULL || call->in(0)->is_top()) { 357 return; 358 } 359 360 const TypeTuple *r = call->tf()->domain(); 361 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 362 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 363 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 364 return; 365 } 366 } 367 368 if (call->in(TypeFunc::Memory)->is_top()) { 369 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 370 return; 371 } 372 373 // check for unreachable loop 374 CallProjections callprojs; 375 call->extract_projections(&callprojs, true); 376 if (callprojs.fallthrough_catchproj == call->in(0) || 377 callprojs.catchall_catchproj == call->in(0) || 378 callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) || 379 callprojs.catchall_memproj == call->in(TypeFunc::Memory) || 380 callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) || 381 callprojs.catchall_ioproj == call->in(TypeFunc::I_O) || 382 (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) || 383 (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) { 384 return; 385 } 386 387 Compile* C = Compile::current(); 388 // Remove inlined methods from Compiler's lists. 389 if (call->is_macro()) { 390 C->remove_macro_node(call); 391 } 392 393 // Make a clone of the JVMState that appropriate to use for driving a parse 394 JVMState* old_jvms = call->jvms(); 395 JVMState* jvms = old_jvms->clone_shallow(C); 396 uint size = call->req(); 397 SafePointNode* map = new SafePointNode(size, jvms); 398 for (uint i1 = 0; i1 < size; i1++) { 399 map->init_req(i1, call->in(i1)); 400 } 401 402 // Make sure the state is a MergeMem for parsing. 403 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 404 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); 405 C->initial_gvn()->set_type_bottom(mem); 406 map->set_req(TypeFunc::Memory, mem); 407 } 408 409 uint nargs = method()->arg_size(); 410 // blow away old call arguments 411 Node* top = C->top(); 412 for (uint i1 = 0; i1 < nargs; i1++) { 413 map->set_req(TypeFunc::Parms + i1, top); 414 } 415 jvms->set_map(map); 416 417 // Make enough space in the expression stack to transfer 418 // the incoming arguments and return value. 419 map->ensure_stack(jvms, jvms->method()->max_stack()); 420 for (uint i1 = 0; i1 < nargs; i1++) { 421 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); 422 } 423 424 C->print_inlining_assert_ready(); 425 426 C->print_inlining_move_to(this); 427 428 C->log_late_inline(this); 429 430 // This check is done here because for_method_handle_inline() method 431 // needs jvms for inlined state. 432 if (!do_late_inline_check(jvms)) { 433 map->disconnect_inputs(NULL, C); 434 return; 435 } 436 437 // Setup default node notes to be picked up by the inlining 438 Node_Notes* old_nn = C->node_notes_at(call->_idx); 439 if (old_nn != NULL) { 440 Node_Notes* entry_nn = old_nn->clone(C); 441 entry_nn->set_jvms(jvms); 442 C->set_default_node_notes(entry_nn); 443 } 444 445 // Now perform the inlining using the synthesized JVMState 446 JVMState* new_jvms = _inline_cg->generate(jvms); 447 if (new_jvms == NULL) return; // no change 448 if (C->failing()) return; 449 450 // Capture any exceptional control flow 451 GraphKit kit(new_jvms); 452 453 // Find the result object 454 Node* result = C->top(); 455 int result_size = method()->return_type()->size(); 456 if (result_size != 0 && !kit.stopped()) { 457 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 458 } 459 460 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 461 C->env()->notice_inlined_method(_inline_cg->method()); 462 C->set_inlining_progress(true); 463 464 kit.replace_call(call, result, true); 465 } 466 467 468 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 469 return new LateInlineCallGenerator(method, inline_cg); 470 } 471 472 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 473 ciMethod* _caller; 474 int _attempt; 475 bool _input_not_const; 476 477 virtual bool do_late_inline_check(JVMState* jvms); 478 virtual bool already_attempted() const { return _attempt > 0; } 479 480 public: 481 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 482 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 483 484 virtual bool is_mh_late_inline() const { return true; } 485 486 virtual JVMState* generate(JVMState* jvms) { 487 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 488 489 Compile* C = Compile::current(); 490 if (_input_not_const) { 491 // inlining won't be possible so no need to enqueue right now. 492 call_node()->set_generator(this); 493 } else { 494 C->add_late_inline(this); 495 } 496 return new_jvms; 497 } 498 }; 499 500 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 501 502 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); 503 504 Compile::current()->print_inlining_update_delayed(this); 505 506 if (!_input_not_const) { 507 _attempt++; 508 } 509 510 if (cg != NULL && cg->is_inline()) { 511 assert(!cg->is_late_inline(), "we're doing late inlining"); 512 _inline_cg = cg; 513 Compile::current()->dec_number_of_mh_late_inlines(); 514 return true; 515 } 516 517 call_node()->set_generator(this); 518 return false; 519 } 520 521 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 522 Compile::current()->inc_number_of_mh_late_inlines(); 523 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 524 return cg; 525 } 526 527 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 528 529 public: 530 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 531 LateInlineCallGenerator(method, inline_cg) {} 532 533 virtual JVMState* generate(JVMState* jvms) { 534 Compile *C = Compile::current(); 535 536 C->log_inline_id(this); 537 538 C->add_string_late_inline(this); 539 540 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 541 return new_jvms; 542 } 543 544 virtual bool is_string_late_inline() const { return true; } 545 }; 546 547 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 548 return new LateInlineStringCallGenerator(method, inline_cg); 549 } 550 551 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 552 553 public: 554 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 555 LateInlineCallGenerator(method, inline_cg) {} 556 557 virtual JVMState* generate(JVMState* jvms) { 558 Compile *C = Compile::current(); 559 560 C->log_inline_id(this); 561 562 C->add_boxing_late_inline(this); 563 564 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 565 return new_jvms; 566 } 567 }; 568 569 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 570 return new LateInlineBoxingCallGenerator(method, inline_cg); 571 } 572 573 //---------------------------WarmCallGenerator-------------------------------- 574 // Internal class which handles initial deferral of inlining decisions. 575 class WarmCallGenerator : public CallGenerator { 576 WarmCallInfo* _call_info; 577 CallGenerator* _if_cold; 578 CallGenerator* _if_hot; 579 bool _is_virtual; // caches virtuality of if_cold 580 bool _is_inline; // caches inline-ness of if_hot 581 582 public: 583 WarmCallGenerator(WarmCallInfo* ci, 584 CallGenerator* if_cold, 585 CallGenerator* if_hot) 586 : CallGenerator(if_cold->method()) 587 { 588 assert(method() == if_hot->method(), "consistent choices"); 589 _call_info = ci; 590 _if_cold = if_cold; 591 _if_hot = if_hot; 592 _is_virtual = if_cold->is_virtual(); 593 _is_inline = if_hot->is_inline(); 594 } 595 596 virtual bool is_inline() const { return _is_inline; } 597 virtual bool is_virtual() const { return _is_virtual; } 598 virtual bool is_deferred() const { return true; } 599 600 virtual JVMState* generate(JVMState* jvms); 601 }; 602 603 604 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 605 CallGenerator* if_cold, 606 CallGenerator* if_hot) { 607 return new WarmCallGenerator(ci, if_cold, if_hot); 608 } 609 610 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 611 Compile* C = Compile::current(); 612 C->print_inlining_update(this); 613 614 if (C->log() != NULL) { 615 C->log()->elem("warm_call bci='%d'", jvms->bci()); 616 } 617 jvms = _if_cold->generate(jvms); 618 if (jvms != NULL) { 619 Node* m = jvms->map()->control(); 620 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 621 if (m->is_Catch()) m = m->in(0); else m = C->top(); 622 if (m->is_Proj()) m = m->in(0); else m = C->top(); 623 if (m->is_CallJava()) { 624 _call_info->set_call(m->as_Call()); 625 _call_info->set_hot_cg(_if_hot); 626 #ifndef PRODUCT 627 if (PrintOpto || PrintOptoInlining) { 628 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 629 tty->print("WCI: "); 630 _call_info->print(); 631 } 632 #endif 633 _call_info->set_heat(_call_info->compute_heat()); 634 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 635 } 636 } 637 return jvms; 638 } 639 640 void WarmCallInfo::make_hot() { 641 Unimplemented(); 642 } 643 644 void WarmCallInfo::make_cold() { 645 // No action: Just dequeue. 646 } 647 648 649 //------------------------PredictedCallGenerator------------------------------ 650 // Internal class which handles all out-of-line calls checking receiver type. 651 class PredictedCallGenerator : public CallGenerator { 652 ciKlass* _predicted_receiver; 653 CallGenerator* _if_missed; 654 CallGenerator* _if_hit; 655 float _hit_prob; 656 657 public: 658 PredictedCallGenerator(ciKlass* predicted_receiver, 659 CallGenerator* if_missed, 660 CallGenerator* if_hit, float hit_prob) 661 : CallGenerator(if_missed->method()) 662 { 663 // The call profile data may predict the hit_prob as extreme as 0 or 1. 664 // Remove the extremes values from the range. 665 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 666 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 667 668 _predicted_receiver = predicted_receiver; 669 _if_missed = if_missed; 670 _if_hit = if_hit; 671 _hit_prob = hit_prob; 672 } 673 674 virtual bool is_virtual() const { return true; } 675 virtual bool is_inline() const { return _if_hit->is_inline(); } 676 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 677 678 virtual JVMState* generate(JVMState* jvms); 679 }; 680 681 682 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 683 CallGenerator* if_missed, 684 CallGenerator* if_hit, 685 float hit_prob) { 686 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 687 } 688 689 690 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 691 GraphKit kit(jvms); 692 kit.C->print_inlining_update(this); 693 PhaseGVN& gvn = kit.gvn(); 694 // We need an explicit receiver null_check before checking its type. 695 // We share a map with the caller, so his JVMS gets adjusted. 696 Node* receiver = kit.argument(0); 697 CompileLog* log = kit.C->log(); 698 if (log != NULL) { 699 log->elem("predicted_call bci='%d' klass='%d'", 700 jvms->bci(), log->identify(_predicted_receiver)); 701 } 702 703 receiver = kit.null_check_receiver_before_call(method()); 704 if (kit.stopped()) { 705 return kit.transfer_exceptions_into_jvms(); 706 } 707 708 // Make a copy of the replaced nodes in case we need to restore them 709 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 710 replaced_nodes.clone(); 711 712 Node* exact_receiver = receiver; // will get updated in place... 713 Node* slow_ctl = kit.type_check_receiver(receiver, 714 _predicted_receiver, _hit_prob, 715 &exact_receiver); 716 717 SafePointNode* slow_map = NULL; 718 JVMState* slow_jvms = NULL; 719 { PreserveJVMState pjvms(&kit); 720 kit.set_control(slow_ctl); 721 if (!kit.stopped()) { 722 slow_jvms = _if_missed->generate(kit.sync_jvms()); 723 if (kit.failing()) 724 return NULL; // might happen because of NodeCountInliningCutoff 725 assert(slow_jvms != NULL, "must be"); 726 kit.add_exception_states_from(slow_jvms); 727 kit.set_map(slow_jvms->map()); 728 if (!kit.stopped()) 729 slow_map = kit.stop(); 730 } 731 } 732 733 if (kit.stopped()) { 734 // Instance exactly does not matches the desired type. 735 kit.set_jvms(slow_jvms); 736 return kit.transfer_exceptions_into_jvms(); 737 } 738 739 // fall through if the instance exactly matches the desired type 740 kit.replace_in_map(receiver, exact_receiver); 741 742 // Make the hot call: 743 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 744 if (new_jvms == NULL) { 745 // Inline failed, so make a direct call. 746 assert(_if_hit->is_inline(), "must have been a failed inline"); 747 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 748 new_jvms = cg->generate(kit.sync_jvms()); 749 } 750 kit.add_exception_states_from(new_jvms); 751 kit.set_jvms(new_jvms); 752 753 // Need to merge slow and fast? 754 if (slow_map == NULL) { 755 // The fast path is the only path remaining. 756 return kit.transfer_exceptions_into_jvms(); 757 } 758 759 if (kit.stopped()) { 760 // Inlined method threw an exception, so it's just the slow path after all. 761 kit.set_jvms(slow_jvms); 762 return kit.transfer_exceptions_into_jvms(); 763 } 764 765 // There are 2 branches and the replaced nodes are only valid on 766 // one: restore the replaced nodes to what they were before the 767 // branch. 768 kit.map()->set_replaced_nodes(replaced_nodes); 769 770 // Finish the diamond. 771 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 772 RegionNode* region = new RegionNode(3); 773 region->init_req(1, kit.control()); 774 region->init_req(2, slow_map->control()); 775 kit.set_control(gvn.transform(region)); 776 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 777 iophi->set_req(2, slow_map->i_o()); 778 kit.set_i_o(gvn.transform(iophi)); 779 // Merge memory 780 kit.merge_memory(slow_map->merged_memory(), region, 2); 781 // Transform new memory Phis. 782 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 783 Node* phi = mms.memory(); 784 if (phi->is_Phi() && phi->in(0) == region) { 785 mms.set_memory(gvn.transform(phi)); 786 } 787 } 788 uint tos = kit.jvms()->stkoff() + kit.sp(); 789 uint limit = slow_map->req(); 790 for (uint i = TypeFunc::Parms; i < limit; i++) { 791 // Skip unused stack slots; fast forward to monoff(); 792 if (i == tos) { 793 i = kit.jvms()->monoff(); 794 if( i >= limit ) break; 795 } 796 Node* m = kit.map()->in(i); 797 Node* n = slow_map->in(i); 798 if (m != n) { 799 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 800 Node* phi = PhiNode::make(region, m, t); 801 phi->set_req(2, n); 802 kit.map()->set_req(i, gvn.transform(phi)); 803 } 804 } 805 return kit.transfer_exceptions_into_jvms(); 806 } 807 808 809 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 810 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); 811 bool input_not_const; 812 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); 813 Compile* C = Compile::current(); 814 if (cg != NULL) { 815 if (!delayed_forbidden && AlwaysIncrementalInline) { 816 return CallGenerator::for_late_inline(callee, cg); 817 } else { 818 return cg; 819 } 820 } 821 int bci = jvms->bci(); 822 ciCallProfile profile = caller->call_profile_at_bci(bci); 823 int call_site_count = caller->scale_count(profile.count()); 824 825 if (IncrementalInline && call_site_count > 0 && 826 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { 827 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 828 } else { 829 // Out-of-line call. 830 return CallGenerator::for_direct_call(callee); 831 } 832 } 833 834 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { 835 GraphKit kit(jvms); 836 PhaseGVN& gvn = kit.gvn(); 837 Compile* C = kit.C; 838 vmIntrinsics::ID iid = callee->intrinsic_id(); 839 input_not_const = true; 840 switch (iid) { 841 case vmIntrinsics::_invokeBasic: 842 { 843 // Get MethodHandle receiver: 844 Node* receiver = kit.argument(0); 845 if (receiver->Opcode() == Op_ConP) { 846 input_not_const = false; 847 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 848 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 849 const int vtable_index = Method::invalid_vtable_index; 850 851 if (!ciMethod::is_consistent_info(callee, target)) { 852 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 853 "signatures mismatch"); 854 return NULL; 855 } 856 857 CallGenerator* cg = C->call_generator(target, vtable_index, 858 false /* call_does_dispatch */, 859 jvms, 860 true /* allow_inline */, 861 PROB_ALWAYS); 862 return cg; 863 } else { 864 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 865 "receiver not constant"); 866 } 867 } 868 break; 869 870 case vmIntrinsics::_linkToVirtual: 871 case vmIntrinsics::_linkToStatic: 872 case vmIntrinsics::_linkToSpecial: 873 case vmIntrinsics::_linkToInterface: 874 { 875 // Get MemberName argument: 876 Node* member_name = kit.argument(callee->arg_size() - 1); 877 if (member_name->Opcode() == Op_ConP) { 878 input_not_const = false; 879 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 880 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 881 882 if (!ciMethod::is_consistent_info(callee, target)) { 883 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 884 "signatures mismatch"); 885 return NULL; 886 } 887 888 // In lambda forms we erase signature types to avoid resolving issues 889 // involving class loaders. When we optimize a method handle invoke 890 // to a direct call we must cast the receiver and arguments to its 891 // actual types. 892 ciSignature* signature = target->signature(); 893 const int receiver_skip = target->is_static() ? 0 : 1; 894 // Cast receiver to its type. 895 if (!target->is_static()) { 896 Node* arg = kit.argument(0); 897 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 898 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); 899 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 900 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 901 kit.set_argument(0, cast_obj); 902 } 903 } 904 // Cast reference arguments to its type. 905 for (int i = 0, j = 0; i < signature->count(); i++) { 906 ciType* t = signature->type_at(i); 907 if (t->is_klass()) { 908 Node* arg = kit.argument(receiver_skip + j); 909 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 910 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 911 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 912 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 913 kit.set_argument(receiver_skip + j, cast_obj); 914 } 915 } 916 j += t->size(); // long and double take two slots 917 } 918 919 // Try to get the most accurate receiver type 920 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 921 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 922 int vtable_index = Method::invalid_vtable_index; 923 bool call_does_dispatch = false; 924 925 ciKlass* speculative_receiver_type = NULL; 926 if (is_virtual_or_interface) { 927 ciInstanceKlass* klass = target->holder(); 928 Node* receiver_node = kit.argument(0); 929 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 930 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 931 // optimize_virtual_call() takes 2 different holder 932 // arguments for a corner case that doesn't apply here (see 933 // Parse::do_call()) 934 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass, 935 target, receiver_type, is_virtual, 936 call_does_dispatch, vtable_index, // out-parameters 937 false /* check_access */); 938 // We lack profiling at this call but type speculation may 939 // provide us with a type 940 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; 941 } 942 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, 943 !StressMethodHandleLinkerInlining /* allow_inline */, 944 PROB_ALWAYS, 945 speculative_receiver_type); 946 return cg; 947 } else { 948 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 949 "member_name not constant"); 950 } 951 } 952 break; 953 954 default: 955 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)); 956 break; 957 } 958 return NULL; 959 } 960 961 962 //------------------------PredicatedIntrinsicGenerator------------------------------ 963 // Internal class which handles all predicated Intrinsic calls. 964 class PredicatedIntrinsicGenerator : public CallGenerator { 965 CallGenerator* _intrinsic; 966 CallGenerator* _cg; 967 968 public: 969 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 970 CallGenerator* cg) 971 : CallGenerator(cg->method()) 972 { 973 _intrinsic = intrinsic; 974 _cg = cg; 975 } 976 977 virtual bool is_virtual() const { return true; } 978 virtual bool is_inlined() const { return true; } 979 virtual bool is_intrinsic() const { return true; } 980 981 virtual JVMState* generate(JVMState* jvms); 982 }; 983 984 985 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 986 CallGenerator* cg) { 987 return new PredicatedIntrinsicGenerator(intrinsic, cg); 988 } 989 990 991 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 992 // The code we want to generate here is: 993 // if (receiver == NULL) 994 // uncommon_Trap 995 // if (predicate(0)) 996 // do_intrinsic(0) 997 // else 998 // if (predicate(1)) 999 // do_intrinsic(1) 1000 // ... 1001 // else 1002 // do_java_comp 1003 1004 GraphKit kit(jvms); 1005 PhaseGVN& gvn = kit.gvn(); 1006 1007 CompileLog* log = kit.C->log(); 1008 if (log != NULL) { 1009 log->elem("predicated_intrinsic bci='%d' method='%d'", 1010 jvms->bci(), log->identify(method())); 1011 } 1012 1013 if (!method()->is_static()) { 1014 // We need an explicit receiver null_check before checking its type in predicate. 1015 // We share a map with the caller, so his JVMS gets adjusted. 1016 Node* receiver = kit.null_check_receiver_before_call(method()); 1017 if (kit.stopped()) { 1018 return kit.transfer_exceptions_into_jvms(); 1019 } 1020 } 1021 1022 int n_predicates = _intrinsic->predicates_count(); 1023 assert(n_predicates > 0, "sanity"); 1024 1025 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 1026 1027 // Region for normal compilation code if intrinsic failed. 1028 Node* slow_region = new RegionNode(1); 1029 1030 int results = 0; 1031 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 1032 #ifdef ASSERT 1033 JVMState* old_jvms = kit.jvms(); 1034 SafePointNode* old_map = kit.map(); 1035 Node* old_io = old_map->i_o(); 1036 Node* old_mem = old_map->memory(); 1037 Node* old_exc = old_map->next_exception(); 1038 #endif 1039 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 1040 #ifdef ASSERT 1041 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 1042 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 1043 SafePointNode* new_map = kit.map(); 1044 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 1045 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 1046 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 1047 #endif 1048 if (!kit.stopped()) { 1049 PreserveJVMState pjvms(&kit); 1050 // Generate intrinsic code: 1051 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 1052 if (new_jvms == NULL) { 1053 // Intrinsic failed, use normal compilation path for this predicate. 1054 slow_region->add_req(kit.control()); 1055 } else { 1056 kit.add_exception_states_from(new_jvms); 1057 kit.set_jvms(new_jvms); 1058 if (!kit.stopped()) { 1059 result_jvms[results++] = kit.jvms(); 1060 } 1061 } 1062 } 1063 if (else_ctrl == NULL) { 1064 else_ctrl = kit.C->top(); 1065 } 1066 kit.set_control(else_ctrl); 1067 } 1068 if (!kit.stopped()) { 1069 // Final 'else' after predicates. 1070 slow_region->add_req(kit.control()); 1071 } 1072 if (slow_region->req() > 1) { 1073 PreserveJVMState pjvms(&kit); 1074 // Generate normal compilation code: 1075 kit.set_control(gvn.transform(slow_region)); 1076 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1077 if (kit.failing()) 1078 return NULL; // might happen because of NodeCountInliningCutoff 1079 assert(new_jvms != NULL, "must be"); 1080 kit.add_exception_states_from(new_jvms); 1081 kit.set_jvms(new_jvms); 1082 if (!kit.stopped()) { 1083 result_jvms[results++] = kit.jvms(); 1084 } 1085 } 1086 1087 if (results == 0) { 1088 // All paths ended in uncommon traps. 1089 (void) kit.stop(); 1090 return kit.transfer_exceptions_into_jvms(); 1091 } 1092 1093 if (results == 1) { // Only one path 1094 kit.set_jvms(result_jvms[0]); 1095 return kit.transfer_exceptions_into_jvms(); 1096 } 1097 1098 // Merge all paths. 1099 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1100 RegionNode* region = new RegionNode(results + 1); 1101 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1102 for (int i = 0; i < results; i++) { 1103 JVMState* jvms = result_jvms[i]; 1104 int path = i + 1; 1105 SafePointNode* map = jvms->map(); 1106 region->init_req(path, map->control()); 1107 iophi->set_req(path, map->i_o()); 1108 if (i == 0) { 1109 kit.set_jvms(jvms); 1110 } else { 1111 kit.merge_memory(map->merged_memory(), region, path); 1112 } 1113 } 1114 kit.set_control(gvn.transform(region)); 1115 kit.set_i_o(gvn.transform(iophi)); 1116 // Transform new memory Phis. 1117 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1118 Node* phi = mms.memory(); 1119 if (phi->is_Phi() && phi->in(0) == region) { 1120 mms.set_memory(gvn.transform(phi)); 1121 } 1122 } 1123 1124 // Merge debug info. 1125 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1126 uint tos = kit.jvms()->stkoff() + kit.sp(); 1127 Node* map = kit.map(); 1128 uint limit = map->req(); 1129 for (uint i = TypeFunc::Parms; i < limit; i++) { 1130 // Skip unused stack slots; fast forward to monoff(); 1131 if (i == tos) { 1132 i = kit.jvms()->monoff(); 1133 if( i >= limit ) break; 1134 } 1135 Node* n = map->in(i); 1136 ins[0] = n; 1137 const Type* t = gvn.type(n); 1138 bool needs_phi = false; 1139 for (int j = 1; j < results; j++) { 1140 JVMState* jvms = result_jvms[j]; 1141 Node* jmap = jvms->map(); 1142 Node* m = NULL; 1143 if (jmap->req() > i) { 1144 m = jmap->in(i); 1145 if (m != n) { 1146 needs_phi = true; 1147 t = t->meet_speculative(gvn.type(m)); 1148 } 1149 } 1150 ins[j] = m; 1151 } 1152 if (needs_phi) { 1153 Node* phi = PhiNode::make(region, n, t); 1154 for (int j = 1; j < results; j++) { 1155 phi->set_req(j + 1, ins[j]); 1156 } 1157 map->set_req(i, gvn.transform(phi)); 1158 } 1159 } 1160 1161 return kit.transfer_exceptions_into_jvms(); 1162 } 1163 1164 //-------------------------UncommonTrapCallGenerator----------------------------- 1165 // Internal class which handles all out-of-line calls checking receiver type. 1166 class UncommonTrapCallGenerator : public CallGenerator { 1167 Deoptimization::DeoptReason _reason; 1168 Deoptimization::DeoptAction _action; 1169 1170 public: 1171 UncommonTrapCallGenerator(ciMethod* m, 1172 Deoptimization::DeoptReason reason, 1173 Deoptimization::DeoptAction action) 1174 : CallGenerator(m) 1175 { 1176 _reason = reason; 1177 _action = action; 1178 } 1179 1180 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1181 virtual bool is_trap() const { return true; } 1182 1183 virtual JVMState* generate(JVMState* jvms); 1184 }; 1185 1186 1187 CallGenerator* 1188 CallGenerator::for_uncommon_trap(ciMethod* m, 1189 Deoptimization::DeoptReason reason, 1190 Deoptimization::DeoptAction action) { 1191 return new UncommonTrapCallGenerator(m, reason, action); 1192 } 1193 1194 1195 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1196 GraphKit kit(jvms); 1197 kit.C->print_inlining_update(this); 1198 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1199 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 1200 // Use callsite signature always. 1201 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 1202 int nargs = declared_method->arg_size(); 1203 kit.inc_sp(nargs); 1204 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1205 if (_reason == Deoptimization::Reason_class_check && 1206 _action == Deoptimization::Action_maybe_recompile) { 1207 // Temp fix for 6529811 1208 // Don't allow uncommon_trap to override our decision to recompile in the event 1209 // of a class cast failure for a monomorphic call as it will never let us convert 1210 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1211 bool keep_exact_action = true; 1212 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1213 } else { 1214 kit.uncommon_trap(_reason, _action); 1215 } 1216 return kit.transfer_exceptions_into_jvms(); 1217 } 1218 1219 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1220 1221 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1222 1223 #define NODES_OVERHEAD_PER_METHOD (30.0) 1224 #define NODES_PER_BYTECODE (9.5) 1225 1226 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1227 int call_count = profile.count(); 1228 int code_size = call_method->code_size(); 1229 1230 // Expected execution count is based on the historical count: 1231 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1232 1233 // Expected profit from inlining, in units of simple call-overheads. 1234 _profit = 1.0; 1235 1236 // Expected work performed by the call in units of call-overheads. 1237 // %%% need an empirical curve fit for "work" (time in call) 1238 float bytecodes_per_call = 3; 1239 _work = 1.0 + code_size / bytecodes_per_call; 1240 1241 // Expected size of compilation graph: 1242 // -XX:+PrintParseStatistics once reported: 1243 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1244 // Histogram of 144298 parsed bytecodes: 1245 // %%% Need an better predictor for graph size. 1246 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1247 } 1248 1249 // is_cold: Return true if the node should never be inlined. 1250 // This is true if any of the key metrics are extreme. 1251 bool WarmCallInfo::is_cold() const { 1252 if (count() < WarmCallMinCount) return true; 1253 if (profit() < WarmCallMinProfit) return true; 1254 if (work() > WarmCallMaxWork) return true; 1255 if (size() > WarmCallMaxSize) return true; 1256 return false; 1257 } 1258 1259 // is_hot: Return true if the node should be inlined immediately. 1260 // This is true if any of the key metrics are extreme. 1261 bool WarmCallInfo::is_hot() const { 1262 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1263 if (count() >= HotCallCountThreshold) return true; 1264 if (profit() >= HotCallProfitThreshold) return true; 1265 if (work() <= HotCallTrivialWork) return true; 1266 if (size() <= HotCallTrivialSize) return true; 1267 return false; 1268 } 1269 1270 // compute_heat: 1271 float WarmCallInfo::compute_heat() const { 1272 assert(!is_cold(), "compute heat only on warm nodes"); 1273 assert(!is_hot(), "compute heat only on warm nodes"); 1274 int min_size = MAX2(0, (int)HotCallTrivialSize); 1275 int max_size = MIN2(500, (int)WarmCallMaxSize); 1276 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1277 float size_factor; 1278 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1279 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1280 else if (method_size < 0.5) size_factor = 1; // better than avg. 1281 else size_factor = 0.5; // worse than avg. 1282 return (count() * profit() * size_factor); 1283 } 1284 1285 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1286 assert(this != that, "compare only different WCIs"); 1287 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1288 if (this->heat() > that->heat()) return true; 1289 if (this->heat() < that->heat()) return false; 1290 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1291 // Equal heat. Break the tie some other way. 1292 if (!this->call() || !that->call()) return (address)this > (address)that; 1293 return this->call()->_idx > that->call()->_idx; 1294 } 1295 1296 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1297 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1298 1299 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1300 assert(next() == UNINIT_NEXT, "not yet on any list"); 1301 WarmCallInfo* prev_p = NULL; 1302 WarmCallInfo* next_p = head; 1303 while (next_p != NULL && next_p->warmer_than(this)) { 1304 prev_p = next_p; 1305 next_p = prev_p->next(); 1306 } 1307 // Install this between prev_p and next_p. 1308 this->set_next(next_p); 1309 if (prev_p == NULL) 1310 head = this; 1311 else 1312 prev_p->set_next(this); 1313 return head; 1314 } 1315 1316 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1317 WarmCallInfo* prev_p = NULL; 1318 WarmCallInfo* next_p = head; 1319 while (next_p != this) { 1320 assert(next_p != NULL, "this must be in the list somewhere"); 1321 prev_p = next_p; 1322 next_p = prev_p->next(); 1323 } 1324 next_p = this->next(); 1325 debug_only(this->set_next(UNINIT_NEXT)); 1326 // Remove this from between prev_p and next_p. 1327 if (prev_p == NULL) 1328 head = next_p; 1329 else 1330 prev_p->set_next(next_p); 1331 return head; 1332 } 1333 1334 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1335 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1336 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1337 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1338 1339 WarmCallInfo* WarmCallInfo::always_hot() { 1340 assert(_always_hot.is_hot(), "must always be hot"); 1341 return &_always_hot; 1342 } 1343 1344 WarmCallInfo* WarmCallInfo::always_cold() { 1345 assert(_always_cold.is_cold(), "must always be cold"); 1346 return &_always_cold; 1347 } 1348 1349 1350 #ifndef PRODUCT 1351 1352 void WarmCallInfo::print() const { 1353 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1354 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1355 count(), profit(), work(), size(), compute_heat(), next()); 1356 tty->cr(); 1357 if (call() != NULL) call()->dump(); 1358 } 1359 1360 void print_wci(WarmCallInfo* ci) { 1361 ci->print(); 1362 } 1363 1364 void WarmCallInfo::print_all() const { 1365 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1366 p->print(); 1367 } 1368 1369 int WarmCallInfo::count_all() const { 1370 int cnt = 0; 1371 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1372 cnt++; 1373 return cnt; 1374 } 1375 1376 #endif //PRODUCT