1 /* 2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 #include "opto/valuetypenode.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 45 // Utility function. 46 const TypeFunc* CallGenerator::tf() const { 47 return TypeFunc::make(method()); 48 } 49 50 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) { 51 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m); 52 } 53 54 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) { 55 ciMethod* symbolic_info = caller->get_method_at_bci(bci); 56 return is_inlined_method_handle_intrinsic(symbolic_info, m); 57 } 58 59 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) { 60 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic(); 61 } 62 63 //-----------------------------ParseGenerator--------------------------------- 64 // Internal class which handles all direct bytecode traversal. 65 class ParseGenerator : public InlineCallGenerator { 66 private: 67 bool _is_osr; 68 float _expected_uses; 69 70 public: 71 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 72 : InlineCallGenerator(method) 73 { 74 _is_osr = is_osr; 75 _expected_uses = expected_uses; 76 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 77 } 78 79 virtual bool is_parse() const { return true; } 80 virtual JVMState* generate(JVMState* jvms); 81 int is_osr() { return _is_osr; } 82 83 }; 84 85 JVMState* ParseGenerator::generate(JVMState* jvms) { 86 Compile* C = Compile::current(); 87 C->print_inlining_update(this); 88 89 if (is_osr()) { 90 // The JVMS for a OSR has a single argument (see its TypeFunc). 91 assert(jvms->depth() == 1, "no inline OSR"); 92 } 93 94 if (C->failing()) { 95 return NULL; // bailing out of the compile; do not try to parse 96 } 97 98 Parse parser(jvms, method(), _expected_uses); 99 // Grab signature for matching/allocation 100 #ifdef ASSERT 101 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 102 assert(C->env()->system_dictionary_modification_counter_changed(), 103 "Must invalidate if TypeFuncs differ"); 104 } 105 #endif 106 107 GraphKit& exits = parser.exits(); 108 109 if (C->failing()) { 110 while (exits.pop_exception_state() != NULL) ; 111 return NULL; 112 } 113 114 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 115 116 // Simply return the exit state of the parser, 117 // augmented by any exceptional states. 118 return exits.transfer_exceptions_into_jvms(); 119 } 120 121 //---------------------------DirectCallGenerator------------------------------ 122 // Internal class which handles all out-of-line calls w/o receiver type checks. 123 class DirectCallGenerator : public CallGenerator { 124 private: 125 CallStaticJavaNode* _call_node; 126 // Force separate memory and I/O projections for the exceptional 127 // paths to facilitate late inlining. 128 bool _separate_io_proj; 129 130 public: 131 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 132 : CallGenerator(method), 133 _call_node(NULL), 134 _separate_io_proj(separate_io_proj) 135 { 136 if (ValueTypeReturnedAsFields && method->is_method_handle_intrinsic()) { 137 // If that call has not been optimized by the time optimizations are over, 138 // we'll need to add a call to create a value type instance from the klass 139 // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return). 140 // Separating memory and I/O projections for exceptions is required to 141 // perform that graph transformation. 142 _separate_io_proj = true; 143 } 144 } 145 virtual JVMState* generate(JVMState* jvms); 146 147 CallStaticJavaNode* call_node() const { return _call_node; } 148 }; 149 150 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 151 GraphKit kit(jvms); 152 kit.C->print_inlining_update(this); 153 PhaseGVN& gvn = kit.gvn(); 154 bool is_static = method()->is_static(); 155 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 156 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 157 158 if (kit.C->log() != NULL) { 159 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 160 } 161 162 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 163 if (is_inlined_method_handle_intrinsic(jvms, method())) { 164 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter, 165 // additional information about the method being invoked should be attached 166 // to the call site to make resolution logic work 167 // (see SharedRuntime::resolve_static_call_C). 168 call->set_override_symbolic_info(true); 169 } 170 _call_node = call; // Save the call node in case we need it later 171 if (!is_static) { 172 // Make an explicit receiver null_check as part of this call. 173 // Since we share a map with the caller, his JVMS gets adjusted. 174 kit.null_check_receiver_before_call(method()); 175 if (kit.stopped()) { 176 // And dump it back to the caller, decorated with any exceptions: 177 return kit.transfer_exceptions_into_jvms(); 178 } 179 // Mark the call node as virtual, sort of: 180 call->set_optimized_virtual(true); 181 if (method()->is_method_handle_intrinsic() || 182 method()->is_compiled_lambda_form()) { 183 call->set_method_handle_invoke(true); 184 } 185 } 186 kit.set_arguments_for_java_call(call, is_late_inline()); 187 if (kit.stopped()) { 188 return kit.transfer_exceptions_into_jvms(); 189 } 190 kit.set_edges_for_java_call(call, false, _separate_io_proj); 191 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 192 kit.push_node(method()->return_type()->basic_type(), ret); 193 return kit.transfer_exceptions_into_jvms(); 194 } 195 196 //--------------------------VirtualCallGenerator------------------------------ 197 // Internal class which handles all out-of-line calls checking receiver type. 198 class VirtualCallGenerator : public CallGenerator { 199 private: 200 int _vtable_index; 201 public: 202 VirtualCallGenerator(ciMethod* method, int vtable_index) 203 : CallGenerator(method), _vtable_index(vtable_index) 204 { 205 assert(vtable_index == Method::invalid_vtable_index || 206 vtable_index >= 0, "either invalid or usable"); 207 } 208 virtual bool is_virtual() const { return true; } 209 virtual JVMState* generate(JVMState* jvms); 210 }; 211 212 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 213 GraphKit kit(jvms); 214 Node* receiver = kit.argument(0); 215 kit.C->print_inlining_update(this); 216 217 if (kit.C->log() != NULL) { 218 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 219 } 220 221 // If the receiver is a constant null, do not torture the system 222 // by attempting to call through it. The compile will proceed 223 // correctly, but may bail out in final_graph_reshaping, because 224 // the call instruction will have a seemingly deficient out-count. 225 // (The bailout says something misleading about an "infinite loop".) 226 if (!receiver->is_ValueType() && kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 227 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); 228 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 229 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 230 kit.inc_sp(arg_size); // restore arguments 231 kit.uncommon_trap(Deoptimization::Reason_null_check, 232 Deoptimization::Action_none, 233 NULL, "null receiver"); 234 return kit.transfer_exceptions_into_jvms(); 235 } 236 237 // Ideally we would unconditionally do a null check here and let it 238 // be converted to an implicit check based on profile information. 239 // However currently the conversion to implicit null checks in 240 // Block::implicit_null_check() only looks for loads and stores, not calls. 241 ciMethod *caller = kit.method(); 242 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 243 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 244 ((ImplicitNullCheckThreshold > 0) && caller_md && 245 (caller_md->trap_count(Deoptimization::Reason_null_check) 246 >= (uint)ImplicitNullCheckThreshold))) { 247 // Make an explicit receiver null_check as part of this call. 248 // Since we share a map with the caller, his JVMS gets adjusted. 249 receiver = kit.null_check_receiver_before_call(method()); 250 if (kit.stopped()) { 251 // And dump it back to the caller, decorated with any exceptions: 252 return kit.transfer_exceptions_into_jvms(); 253 } 254 } 255 256 assert(!method()->is_static(), "virtual call must not be to static"); 257 assert(!method()->is_final(), "virtual call should not be to final"); 258 assert(!method()->is_private(), "virtual call should not be to private"); 259 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 260 "no vtable calls if +UseInlineCaches "); 261 address target = SharedRuntime::get_resolve_virtual_call_stub(); 262 // Normal inline cache used for call 263 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 264 if (is_inlined_method_handle_intrinsic(jvms, method())) { 265 // To be able to issue a direct call (optimized virtual or virtual) 266 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information 267 // about the method being invoked should be attached to the call site to 268 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C). 269 call->set_override_symbolic_info(true); 270 } 271 kit.set_arguments_for_java_call(call); 272 if (kit.stopped()) { 273 return kit.transfer_exceptions_into_jvms(); 274 } 275 kit.set_edges_for_java_call(call); 276 Node* ret = kit.set_results_for_java_call(call); 277 kit.push_node(method()->return_type()->basic_type(), ret); 278 279 // Represent the effect of an implicit receiver null_check 280 // as part of this call. Since we share a map with the caller, 281 // his JVMS gets adjusted. 282 kit.cast_not_null(receiver); 283 return kit.transfer_exceptions_into_jvms(); 284 } 285 286 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 287 if (InlineTree::check_can_parse(m) != NULL) return NULL; 288 return new ParseGenerator(m, expected_uses); 289 } 290 291 // As a special case, the JVMS passed to this CallGenerator is 292 // for the method execution already in progress, not just the JVMS 293 // of the caller. Thus, this CallGenerator cannot be mixed with others! 294 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 295 if (InlineTree::check_can_parse(m) != NULL) return NULL; 296 float past_uses = m->interpreter_invocation_count(); 297 float expected_uses = past_uses; 298 return new ParseGenerator(m, expected_uses, true); 299 } 300 301 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 302 assert(!m->is_abstract(), "for_direct_call mismatch"); 303 return new DirectCallGenerator(m, separate_io_proj); 304 } 305 306 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 307 assert(!m->is_static(), "for_virtual_call mismatch"); 308 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 309 return new VirtualCallGenerator(m, vtable_index); 310 } 311 312 // Allow inlining decisions to be delayed 313 class LateInlineCallGenerator : public DirectCallGenerator { 314 private: 315 jlong _unique_id; // unique id for log compilation 316 bool _is_pure_call; // a hint that the call doesn't have important side effects to care about 317 318 protected: 319 CallGenerator* _inline_cg; 320 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 321 322 public: 323 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) : 324 DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {} 325 326 virtual bool is_late_inline() const { return true; } 327 328 // Convert the CallStaticJava into an inline 329 virtual void do_late_inline(); 330 331 virtual JVMState* generate(JVMState* jvms) { 332 Compile *C = Compile::current(); 333 334 C->log_inline_id(this); 335 336 // Record that this call site should be revisited once the main 337 // parse is finished. 338 if (!is_mh_late_inline()) { 339 C->add_late_inline(this); 340 } 341 342 // Emit the CallStaticJava and request separate projections so 343 // that the late inlining logic can distinguish between fall 344 // through and exceptional uses of the memory and io projections 345 // as is done for allocations and macro expansion. 346 return DirectCallGenerator::generate(jvms); 347 } 348 349 virtual void print_inlining_late(const char* msg) { 350 CallNode* call = call_node(); 351 Compile* C = Compile::current(); 352 C->print_inlining_assert_ready(); 353 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 354 C->print_inlining_move_to(this); 355 C->print_inlining_update_delayed(this); 356 } 357 358 virtual void set_unique_id(jlong id) { 359 _unique_id = id; 360 } 361 362 virtual jlong unique_id() const { 363 return _unique_id; 364 } 365 }; 366 367 void LateInlineCallGenerator::do_late_inline() { 368 // Can't inline it 369 CallStaticJavaNode* call = call_node(); 370 if (call == NULL || call->outcnt() == 0 || 371 call->in(0) == NULL || call->in(0)->is_top()) { 372 return; 373 } 374 375 const TypeTuple *r = call->tf()->domain_cc(); 376 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 377 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 378 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 379 return; 380 } 381 } 382 383 if (call->in(TypeFunc::Memory)->is_top()) { 384 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 385 return; 386 } 387 388 // check for unreachable loop 389 CallProjections* callprojs = call->extract_projections(true); 390 if (callprojs->fallthrough_catchproj == call->in(0) || 391 callprojs->catchall_catchproj == call->in(0) || 392 callprojs->fallthrough_memproj == call->in(TypeFunc::Memory) || 393 callprojs->catchall_memproj == call->in(TypeFunc::Memory) || 394 callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O) || 395 callprojs->catchall_ioproj == call->in(TypeFunc::I_O) || 396 (callprojs->exobj != NULL && call->find_edge(callprojs->exobj) != -1)) { 397 return; 398 } 399 bool result_not_used = true; 400 for (uint i = 0; i < callprojs->nb_resproj; i++) { 401 if (callprojs->resproj[i] != NULL) { 402 if (callprojs->resproj[i]->outcnt() != 0) { 403 result_not_used = false; 404 } 405 if (call->find_edge(callprojs->resproj[i]) != -1) { 406 return; 407 } 408 } 409 } 410 411 Compile* C = Compile::current(); 412 // Remove inlined methods from Compiler's lists. 413 if (call->is_macro()) { 414 C->remove_macro_node(call); 415 } 416 417 if (_is_pure_call && result_not_used) { 418 // The call is marked as pure (no important side effects), but result isn't used. 419 // It's safe to remove the call. 420 GraphKit kit(call->jvms()); 421 kit.replace_call(call, C->top(), true); 422 } else { 423 // Make a clone of the JVMState that appropriate to use for driving a parse 424 JVMState* old_jvms = call->jvms(); 425 JVMState* jvms = old_jvms->clone_shallow(C); 426 uint size = call->req(); 427 SafePointNode* map = new SafePointNode(size, jvms); 428 for (uint i1 = 0; i1 < size; i1++) { 429 map->init_req(i1, call->in(i1)); 430 } 431 432 PhaseGVN& gvn = *C->initial_gvn(); 433 // Make sure the state is a MergeMem for parsing. 434 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 435 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); 436 gvn.set_type_bottom(mem); 437 map->set_req(TypeFunc::Memory, mem); 438 } 439 440 // blow away old call arguments 441 Node* top = C->top(); 442 for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) { 443 map->set_req(i1, top); 444 } 445 jvms->set_map(map); 446 447 // Make enough space in the expression stack to transfer 448 // the incoming arguments and return value. 449 map->ensure_stack(jvms, jvms->method()->max_stack()); 450 const TypeTuple *domain_sig = call->_tf->domain_sig(); 451 ExtendedSignature sig_cc = ExtendedSignature(method()->get_sig_cc(), SigEntryFilter()); 452 uint nargs = method()->arg_size(); 453 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature"); 454 455 uint j = TypeFunc::Parms; 456 for (uint i1 = 0; i1 < nargs; i1++) { 457 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1); 458 if (method()->has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) { 459 // Value type arguments are not passed by reference: we get an argument per 460 // field of the value type. Build ValueTypeNodes from the value type arguments. 461 GraphKit arg_kit(jvms, &gvn); 462 arg_kit.set_control(map->control()); 463 ValueTypeNode* vt = ValueTypeNode::make_from_multi(&arg_kit, call, sig_cc, t->value_klass(), j, true); 464 map->set_control(arg_kit.control()); 465 map->set_argument(jvms, i1, vt); 466 } else { 467 map->set_argument(jvms, i1, call->in(j++)); 468 BasicType bt = t->basic_type(); 469 while (SigEntry::next_is_reserved(sig_cc, bt, true)) { 470 j += type2size[bt]; // Skip reserved arguments 471 } 472 } 473 } 474 475 C->print_inlining_assert_ready(); 476 477 C->print_inlining_move_to(this); 478 479 C->log_late_inline(this); 480 481 // This check is done here because for_method_handle_inline() method 482 // needs jvms for inlined state. 483 if (!do_late_inline_check(jvms)) { 484 map->disconnect_inputs(NULL, C); 485 return; 486 } 487 488 // Setup default node notes to be picked up by the inlining 489 Node_Notes* old_nn = C->node_notes_at(call->_idx); 490 if (old_nn != NULL) { 491 Node_Notes* entry_nn = old_nn->clone(C); 492 entry_nn->set_jvms(jvms); 493 C->set_default_node_notes(entry_nn); 494 } 495 496 // Now perform the inlining using the synthesized JVMState 497 JVMState* new_jvms = _inline_cg->generate(jvms); 498 if (new_jvms == NULL) return; // no change 499 if (C->failing()) return; 500 501 // Capture any exceptional control flow 502 GraphKit kit(new_jvms); 503 504 // Find the result object 505 Node* result = C->top(); 506 int result_size = method()->return_type()->size(); 507 if (result_size != 0 && !kit.stopped()) { 508 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 509 } 510 511 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 512 C->env()->notice_inlined_method(_inline_cg->method()); 513 C->set_inlining_progress(true); 514 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup 515 516 // Handle value type returns 517 bool returned_as_fields = call->tf()->returns_value_type_as_fields(); 518 if (result->is_ValueType()) { 519 ValueTypeNode* vt = result->as_ValueType(); 520 if (returned_as_fields) { 521 // Return of multiple values (the fields of a value type) 522 vt->replace_call_results(&kit, call, C); 523 if (vt->is_allocated(&gvn) && !StressValueTypeReturnedAsFields) { 524 result = vt->get_oop(); 525 } else { 526 result = vt->tagged_klass(gvn); 527 } 528 } else { 529 result = ValueTypePtrNode::make_from_value_type(&kit, vt); 530 } 531 } else if (gvn.type(result)->is_valuetypeptr() && returned_as_fields) { 532 const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms); 533 Node* cast = new CheckCastPPNode(NULL, result, vt_t); 534 gvn.record_for_igvn(cast); 535 ValueTypePtrNode* vtptr = ValueTypePtrNode::make_from_oop(&kit, gvn.transform(cast)); 536 vtptr->replace_call_results(&kit, call, C); 537 result = cast; 538 } 539 540 kit.replace_call(call, result, true); 541 } 542 } 543 544 545 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 546 return new LateInlineCallGenerator(method, inline_cg); 547 } 548 549 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 550 ciMethod* _caller; 551 int _attempt; 552 bool _input_not_const; 553 554 virtual bool do_late_inline_check(JVMState* jvms); 555 virtual bool already_attempted() const { return _attempt > 0; } 556 557 public: 558 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 559 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 560 561 virtual bool is_mh_late_inline() const { return true; } 562 563 virtual JVMState* generate(JVMState* jvms) { 564 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 565 566 Compile* C = Compile::current(); 567 if (_input_not_const) { 568 // inlining won't be possible so no need to enqueue right now. 569 call_node()->set_generator(this); 570 } else { 571 C->add_late_inline(this); 572 } 573 return new_jvms; 574 } 575 }; 576 577 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 578 579 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const, AlwaysIncrementalInline); 580 581 Compile::current()->print_inlining_update_delayed(this); 582 583 if (!_input_not_const) { 584 _attempt++; 585 } 586 587 if (cg != NULL && (cg->is_inline() || cg->is_inlined_method_handle_intrinsic(jvms, cg->method()))) { 588 assert(!cg->is_late_inline(), "we're doing late inlining"); 589 _inline_cg = cg; 590 Compile::current()->dec_number_of_mh_late_inlines(); 591 return true; 592 } 593 594 call_node()->set_generator(this); 595 return false; 596 } 597 598 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 599 Compile::current()->inc_number_of_mh_late_inlines(); 600 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 601 return cg; 602 } 603 604 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 605 606 public: 607 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 608 LateInlineCallGenerator(method, inline_cg) {} 609 610 virtual JVMState* generate(JVMState* jvms) { 611 Compile *C = Compile::current(); 612 613 C->log_inline_id(this); 614 615 C->add_string_late_inline(this); 616 617 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 618 return new_jvms; 619 } 620 621 virtual bool is_string_late_inline() const { return true; } 622 }; 623 624 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 625 return new LateInlineStringCallGenerator(method, inline_cg); 626 } 627 628 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 629 630 public: 631 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 632 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {} 633 634 virtual JVMState* generate(JVMState* jvms) { 635 Compile *C = Compile::current(); 636 637 C->log_inline_id(this); 638 639 C->add_boxing_late_inline(this); 640 641 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 642 return new_jvms; 643 } 644 }; 645 646 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 647 return new LateInlineBoxingCallGenerator(method, inline_cg); 648 } 649 650 //---------------------------WarmCallGenerator-------------------------------- 651 // Internal class which handles initial deferral of inlining decisions. 652 class WarmCallGenerator : public CallGenerator { 653 WarmCallInfo* _call_info; 654 CallGenerator* _if_cold; 655 CallGenerator* _if_hot; 656 bool _is_virtual; // caches virtuality of if_cold 657 bool _is_inline; // caches inline-ness of if_hot 658 659 public: 660 WarmCallGenerator(WarmCallInfo* ci, 661 CallGenerator* if_cold, 662 CallGenerator* if_hot) 663 : CallGenerator(if_cold->method()) 664 { 665 assert(method() == if_hot->method(), "consistent choices"); 666 _call_info = ci; 667 _if_cold = if_cold; 668 _if_hot = if_hot; 669 _is_virtual = if_cold->is_virtual(); 670 _is_inline = if_hot->is_inline(); 671 } 672 673 virtual bool is_inline() const { return _is_inline; } 674 virtual bool is_virtual() const { return _is_virtual; } 675 virtual bool is_deferred() const { return true; } 676 677 virtual JVMState* generate(JVMState* jvms); 678 }; 679 680 681 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 682 CallGenerator* if_cold, 683 CallGenerator* if_hot) { 684 return new WarmCallGenerator(ci, if_cold, if_hot); 685 } 686 687 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 688 Compile* C = Compile::current(); 689 C->print_inlining_update(this); 690 691 if (C->log() != NULL) { 692 C->log()->elem("warm_call bci='%d'", jvms->bci()); 693 } 694 jvms = _if_cold->generate(jvms); 695 if (jvms != NULL) { 696 Node* m = jvms->map()->control(); 697 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 698 if (m->is_Catch()) m = m->in(0); else m = C->top(); 699 if (m->is_Proj()) m = m->in(0); else m = C->top(); 700 if (m->is_CallJava()) { 701 _call_info->set_call(m->as_Call()); 702 _call_info->set_hot_cg(_if_hot); 703 #ifndef PRODUCT 704 if (PrintOpto || PrintOptoInlining) { 705 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 706 tty->print("WCI: "); 707 _call_info->print(); 708 } 709 #endif 710 _call_info->set_heat(_call_info->compute_heat()); 711 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 712 } 713 } 714 return jvms; 715 } 716 717 void WarmCallInfo::make_hot() { 718 Unimplemented(); 719 } 720 721 void WarmCallInfo::make_cold() { 722 // No action: Just dequeue. 723 } 724 725 726 //------------------------PredictedCallGenerator------------------------------ 727 // Internal class which handles all out-of-line calls checking receiver type. 728 class PredictedCallGenerator : public CallGenerator { 729 ciKlass* _predicted_receiver; 730 CallGenerator* _if_missed; 731 CallGenerator* _if_hit; 732 float _hit_prob; 733 bool _exact_check; 734 735 public: 736 PredictedCallGenerator(ciKlass* predicted_receiver, 737 CallGenerator* if_missed, 738 CallGenerator* if_hit, bool exact_check, 739 float hit_prob) 740 : CallGenerator(if_missed->method()) 741 { 742 // The call profile data may predict the hit_prob as extreme as 0 or 1. 743 // Remove the extremes values from the range. 744 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 745 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 746 747 _predicted_receiver = predicted_receiver; 748 _if_missed = if_missed; 749 _if_hit = if_hit; 750 _hit_prob = hit_prob; 751 _exact_check = exact_check; 752 } 753 754 virtual bool is_virtual() const { return true; } 755 virtual bool is_inline() const { return _if_hit->is_inline(); } 756 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 757 758 virtual JVMState* generate(JVMState* jvms); 759 }; 760 761 762 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 763 CallGenerator* if_missed, 764 CallGenerator* if_hit, 765 float hit_prob) { 766 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, 767 /*exact_check=*/true, hit_prob); 768 } 769 770 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver, 771 CallGenerator* if_missed, 772 CallGenerator* if_hit) { 773 return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit, 774 /*exact_check=*/false, PROB_ALWAYS); 775 } 776 777 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 778 GraphKit kit(jvms); 779 kit.C->print_inlining_update(this); 780 PhaseGVN& gvn = kit.gvn(); 781 // We need an explicit receiver null_check before checking its type. 782 // We share a map with the caller, so his JVMS gets adjusted. 783 Node* receiver = kit.argument(0); 784 CompileLog* log = kit.C->log(); 785 if (log != NULL) { 786 log->elem("predicted_call bci='%d' exact='%d' klass='%d'", 787 jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver)); 788 } 789 790 receiver = kit.null_check_receiver_before_call(method()); 791 if (kit.stopped()) { 792 return kit.transfer_exceptions_into_jvms(); 793 } 794 795 // Make a copy of the replaced nodes in case we need to restore them 796 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 797 replaced_nodes.clone(); 798 799 Node* casted_receiver = receiver; // will get updated in place... 800 Node* slow_ctl = NULL; 801 if (_exact_check) { 802 slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob, 803 &casted_receiver); 804 } else { 805 slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver, 806 &casted_receiver); 807 } 808 809 SafePointNode* slow_map = NULL; 810 JVMState* slow_jvms = NULL; 811 { PreserveJVMState pjvms(&kit); 812 kit.set_control(slow_ctl); 813 if (!kit.stopped()) { 814 slow_jvms = _if_missed->generate(kit.sync_jvms()); 815 if (kit.failing()) 816 return NULL; // might happen because of NodeCountInliningCutoff 817 assert(slow_jvms != NULL, "must be"); 818 kit.add_exception_states_from(slow_jvms); 819 kit.set_map(slow_jvms->map()); 820 if (!kit.stopped()) 821 slow_map = kit.stop(); 822 } 823 } 824 825 if (kit.stopped()) { 826 // Instance exactly does not matches the desired type. 827 kit.set_jvms(slow_jvms); 828 return kit.transfer_exceptions_into_jvms(); 829 } 830 831 // fall through if the instance exactly matches the desired type 832 kit.replace_in_map(receiver, casted_receiver); 833 834 // Make the hot call: 835 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 836 if (new_jvms == NULL) { 837 // Inline failed, so make a direct call. 838 assert(_if_hit->is_inline(), "must have been a failed inline"); 839 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 840 new_jvms = cg->generate(kit.sync_jvms()); 841 } 842 kit.add_exception_states_from(new_jvms); 843 kit.set_jvms(new_jvms); 844 845 // Need to merge slow and fast? 846 if (slow_map == NULL) { 847 // The fast path is the only path remaining. 848 return kit.transfer_exceptions_into_jvms(); 849 } 850 851 if (kit.stopped()) { 852 // Inlined method threw an exception, so it's just the slow path after all. 853 kit.set_jvms(slow_jvms); 854 return kit.transfer_exceptions_into_jvms(); 855 } 856 857 // Allocate value types if they are merged with objects (similar to Parse::merge_common()) 858 uint tos = kit.jvms()->stkoff() + kit.sp(); 859 uint limit = slow_map->req(); 860 for (uint i = TypeFunc::Parms; i < limit; i++) { 861 Node* m = kit.map()->in(i); 862 Node* n = slow_map->in(i); 863 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 864 if (m->is_ValueType() && !t->isa_valuetype()) { 865 // Allocate value type in fast path 866 m = ValueTypePtrNode::make_from_value_type(&kit, m->as_ValueType()); 867 kit.map()->set_req(i, m); 868 } 869 if (n->is_ValueType() && !t->isa_valuetype()) { 870 // Allocate value type in slow path 871 PreserveJVMState pjvms(&kit); 872 kit.set_map(slow_map); 873 n = ValueTypePtrNode::make_from_value_type(&kit, n->as_ValueType()); 874 kit.map()->set_req(i, n); 875 slow_map = kit.stop(); 876 } 877 } 878 879 // There are 2 branches and the replaced nodes are only valid on 880 // one: restore the replaced nodes to what they were before the 881 // branch. 882 kit.map()->set_replaced_nodes(replaced_nodes); 883 884 // Finish the diamond. 885 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 886 RegionNode* region = new RegionNode(3); 887 region->init_req(1, kit.control()); 888 region->init_req(2, slow_map->control()); 889 kit.set_control(gvn.transform(region)); 890 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 891 iophi->set_req(2, slow_map->i_o()); 892 kit.set_i_o(gvn.transform(iophi)); 893 // Merge memory 894 kit.merge_memory(slow_map->merged_memory(), region, 2); 895 // Transform new memory Phis. 896 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 897 Node* phi = mms.memory(); 898 if (phi->is_Phi() && phi->in(0) == region) { 899 mms.set_memory(gvn.transform(phi)); 900 } 901 } 902 for (uint i = TypeFunc::Parms; i < limit; i++) { 903 // Skip unused stack slots; fast forward to monoff(); 904 if (i == tos) { 905 i = kit.jvms()->monoff(); 906 if( i >= limit ) break; 907 } 908 Node* m = kit.map()->in(i); 909 Node* n = slow_map->in(i); 910 if (m != n) { 911 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 912 Node* phi = PhiNode::make(region, m, t); 913 phi->set_req(2, n); 914 kit.map()->set_req(i, gvn.transform(phi)); 915 } 916 } 917 return kit.transfer_exceptions_into_jvms(); 918 } 919 920 921 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 922 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); 923 bool input_not_const; 924 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const, false); 925 Compile* C = Compile::current(); 926 if (cg != NULL) { 927 if (!delayed_forbidden && AlwaysIncrementalInline) { 928 return CallGenerator::for_late_inline(callee, cg); 929 } else { 930 return cg; 931 } 932 } 933 int bci = jvms->bci(); 934 ciCallProfile profile = caller->call_profile_at_bci(bci); 935 int call_site_count = caller->scale_count(profile.count()); 936 937 if (IncrementalInline && (AlwaysIncrementalInline || 938 (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) { 939 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 940 } else { 941 // Out-of-line call. 942 return CallGenerator::for_direct_call(callee); 943 } 944 } 945 946 static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit) { 947 PhaseGVN& gvn = kit.gvn(); 948 Node* arg = kit.argument(arg_nb); 949 const Type* arg_type = arg->bottom_type(); 950 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 951 if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) { 952 const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part 953 arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type)); 954 kit.set_argument(arg_nb, arg); 955 } 956 if (sig_type->is_valuetypeptr() && !arg->is_ValueType() && 957 !kit.gvn().type(arg)->maybe_null() && t->as_value_klass()->is_scalarizable()) { 958 arg = ValueTypeNode::make_from_oop(&kit, arg, t->as_value_klass()); 959 kit.set_argument(arg_nb, arg); 960 } 961 } 962 963 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) { 964 GraphKit kit(jvms); 965 PhaseGVN& gvn = kit.gvn(); 966 Compile* C = kit.C; 967 vmIntrinsics::ID iid = callee->intrinsic_id(); 968 input_not_const = true; 969 switch (iid) { 970 case vmIntrinsics::_invokeBasic: 971 { 972 // Get MethodHandle receiver: 973 Node* receiver = kit.argument(0); 974 if (receiver->Opcode() == Op_ConP) { 975 input_not_const = false; 976 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 977 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 978 const int vtable_index = Method::invalid_vtable_index; 979 980 if (!ciMethod::is_consistent_info(callee, target)) { 981 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 982 "signatures mismatch"); 983 return NULL; 984 } 985 986 CallGenerator* cg = C->call_generator(target, vtable_index, 987 false /* call_does_dispatch */, 988 jvms, 989 true /* allow_inline */, 990 PROB_ALWAYS, 991 NULL, 992 true, 993 delayed_forbidden); 994 return cg; 995 } else { 996 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 997 "receiver not constant"); 998 } 999 } 1000 break; 1001 1002 case vmIntrinsics::_linkToVirtual: 1003 case vmIntrinsics::_linkToStatic: 1004 case vmIntrinsics::_linkToSpecial: 1005 case vmIntrinsics::_linkToInterface: 1006 { 1007 int nargs = callee->arg_size(); 1008 // Get MemberName argument: 1009 Node* member_name = kit.argument(nargs - 1); 1010 if (member_name->Opcode() == Op_ConP) { 1011 input_not_const = false; 1012 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 1013 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 1014 1015 if (!ciMethod::is_consistent_info(callee, target)) { 1016 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1017 "signatures mismatch"); 1018 return NULL; 1019 } 1020 1021 // In lambda forms we erase signature types to avoid resolving issues 1022 // involving class loaders. When we optimize a method handle invoke 1023 // to a direct call we must cast the receiver and arguments to its 1024 // actual types. 1025 ciSignature* signature = target->signature(); 1026 const int receiver_skip = target->is_static() ? 0 : 1; 1027 // Cast receiver to its type. 1028 if (!target->is_static()) { 1029 cast_argument(nargs, 0, signature->accessing_klass(), kit); 1030 } 1031 // Cast reference arguments to its type. 1032 for (int i = 0, j = 0; i < signature->count(); i++) { 1033 ciType* t = signature->type_at(i); 1034 if (t->is_klass()) { 1035 cast_argument(nargs, receiver_skip + j, t, kit); 1036 } 1037 j += t->size(); // long and double take two slots 1038 } 1039 1040 // Try to get the most accurate receiver type 1041 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 1042 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 1043 int vtable_index = Method::invalid_vtable_index; 1044 bool call_does_dispatch = false; 1045 1046 ciKlass* speculative_receiver_type = NULL; 1047 if (is_virtual_or_interface) { 1048 ciInstanceKlass* klass = target->holder(); 1049 Node* receiver_node = kit.argument(0); 1050 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 1051 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 1052 // optimize_virtual_call() takes 2 different holder 1053 // arguments for a corner case that doesn't apply here (see 1054 // Parse::do_call()) 1055 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass, 1056 target, receiver_type, is_virtual, 1057 call_does_dispatch, vtable_index, // out-parameters 1058 false /* check_access */); 1059 // We lack profiling at this call but type speculation may 1060 // provide us with a type 1061 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; 1062 } 1063 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, 1064 !StressMethodHandleLinkerInlining /* allow_inline */, 1065 PROB_ALWAYS, 1066 speculative_receiver_type, 1067 true, 1068 delayed_forbidden); 1069 return cg; 1070 } else { 1071 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1072 "member_name not constant"); 1073 } 1074 } 1075 break; 1076 1077 default: 1078 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)); 1079 break; 1080 } 1081 return NULL; 1082 } 1083 1084 1085 //------------------------PredicatedIntrinsicGenerator------------------------------ 1086 // Internal class which handles all predicated Intrinsic calls. 1087 class PredicatedIntrinsicGenerator : public CallGenerator { 1088 CallGenerator* _intrinsic; 1089 CallGenerator* _cg; 1090 1091 public: 1092 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 1093 CallGenerator* cg) 1094 : CallGenerator(cg->method()) 1095 { 1096 _intrinsic = intrinsic; 1097 _cg = cg; 1098 } 1099 1100 virtual bool is_virtual() const { return true; } 1101 virtual bool is_inlined() const { return true; } 1102 virtual bool is_intrinsic() const { return true; } 1103 1104 virtual JVMState* generate(JVMState* jvms); 1105 }; 1106 1107 1108 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 1109 CallGenerator* cg) { 1110 return new PredicatedIntrinsicGenerator(intrinsic, cg); 1111 } 1112 1113 1114 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 1115 // The code we want to generate here is: 1116 // if (receiver == NULL) 1117 // uncommon_Trap 1118 // if (predicate(0)) 1119 // do_intrinsic(0) 1120 // else 1121 // if (predicate(1)) 1122 // do_intrinsic(1) 1123 // ... 1124 // else 1125 // do_java_comp 1126 1127 GraphKit kit(jvms); 1128 PhaseGVN& gvn = kit.gvn(); 1129 1130 CompileLog* log = kit.C->log(); 1131 if (log != NULL) { 1132 log->elem("predicated_intrinsic bci='%d' method='%d'", 1133 jvms->bci(), log->identify(method())); 1134 } 1135 1136 if (!method()->is_static()) { 1137 // We need an explicit receiver null_check before checking its type in predicate. 1138 // We share a map with the caller, so his JVMS gets adjusted. 1139 kit.null_check_receiver_before_call(method()); 1140 if (kit.stopped()) { 1141 return kit.transfer_exceptions_into_jvms(); 1142 } 1143 } 1144 1145 int n_predicates = _intrinsic->predicates_count(); 1146 assert(n_predicates > 0, "sanity"); 1147 1148 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 1149 1150 // Region for normal compilation code if intrinsic failed. 1151 Node* slow_region = new RegionNode(1); 1152 1153 int results = 0; 1154 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 1155 #ifdef ASSERT 1156 JVMState* old_jvms = kit.jvms(); 1157 SafePointNode* old_map = kit.map(); 1158 Node* old_io = old_map->i_o(); 1159 Node* old_mem = old_map->memory(); 1160 Node* old_exc = old_map->next_exception(); 1161 #endif 1162 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 1163 #ifdef ASSERT 1164 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 1165 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 1166 SafePointNode* new_map = kit.map(); 1167 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 1168 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 1169 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 1170 #endif 1171 if (!kit.stopped()) { 1172 PreserveJVMState pjvms(&kit); 1173 // Generate intrinsic code: 1174 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 1175 if (new_jvms == NULL) { 1176 // Intrinsic failed, use normal compilation path for this predicate. 1177 slow_region->add_req(kit.control()); 1178 } else { 1179 kit.add_exception_states_from(new_jvms); 1180 kit.set_jvms(new_jvms); 1181 if (!kit.stopped()) { 1182 result_jvms[results++] = kit.jvms(); 1183 } 1184 } 1185 } 1186 if (else_ctrl == NULL) { 1187 else_ctrl = kit.C->top(); 1188 } 1189 kit.set_control(else_ctrl); 1190 } 1191 if (!kit.stopped()) { 1192 // Final 'else' after predicates. 1193 slow_region->add_req(kit.control()); 1194 } 1195 if (slow_region->req() > 1) { 1196 PreserveJVMState pjvms(&kit); 1197 // Generate normal compilation code: 1198 kit.set_control(gvn.transform(slow_region)); 1199 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1200 if (kit.failing()) 1201 return NULL; // might happen because of NodeCountInliningCutoff 1202 assert(new_jvms != NULL, "must be"); 1203 kit.add_exception_states_from(new_jvms); 1204 kit.set_jvms(new_jvms); 1205 if (!kit.stopped()) { 1206 result_jvms[results++] = kit.jvms(); 1207 } 1208 } 1209 1210 if (results == 0) { 1211 // All paths ended in uncommon traps. 1212 (void) kit.stop(); 1213 return kit.transfer_exceptions_into_jvms(); 1214 } 1215 1216 if (results == 1) { // Only one path 1217 kit.set_jvms(result_jvms[0]); 1218 return kit.transfer_exceptions_into_jvms(); 1219 } 1220 1221 // Merge all paths. 1222 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1223 RegionNode* region = new RegionNode(results + 1); 1224 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1225 for (int i = 0; i < results; i++) { 1226 JVMState* jvms = result_jvms[i]; 1227 int path = i + 1; 1228 SafePointNode* map = jvms->map(); 1229 region->init_req(path, map->control()); 1230 iophi->set_req(path, map->i_o()); 1231 if (i == 0) { 1232 kit.set_jvms(jvms); 1233 } else { 1234 kit.merge_memory(map->merged_memory(), region, path); 1235 } 1236 } 1237 kit.set_control(gvn.transform(region)); 1238 kit.set_i_o(gvn.transform(iophi)); 1239 // Transform new memory Phis. 1240 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1241 Node* phi = mms.memory(); 1242 if (phi->is_Phi() && phi->in(0) == region) { 1243 mms.set_memory(gvn.transform(phi)); 1244 } 1245 } 1246 1247 // Merge debug info. 1248 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1249 uint tos = kit.jvms()->stkoff() + kit.sp(); 1250 Node* map = kit.map(); 1251 uint limit = map->req(); 1252 for (uint i = TypeFunc::Parms; i < limit; i++) { 1253 // Skip unused stack slots; fast forward to monoff(); 1254 if (i == tos) { 1255 i = kit.jvms()->monoff(); 1256 if( i >= limit ) break; 1257 } 1258 Node* n = map->in(i); 1259 ins[0] = n; 1260 const Type* t = gvn.type(n); 1261 bool needs_phi = false; 1262 for (int j = 1; j < results; j++) { 1263 JVMState* jvms = result_jvms[j]; 1264 Node* jmap = jvms->map(); 1265 Node* m = NULL; 1266 if (jmap->req() > i) { 1267 m = jmap->in(i); 1268 if (m != n) { 1269 needs_phi = true; 1270 t = t->meet_speculative(gvn.type(m)); 1271 } 1272 } 1273 ins[j] = m; 1274 } 1275 if (needs_phi) { 1276 Node* phi = PhiNode::make(region, n, t); 1277 for (int j = 1; j < results; j++) { 1278 phi->set_req(j + 1, ins[j]); 1279 } 1280 map->set_req(i, gvn.transform(phi)); 1281 } 1282 } 1283 1284 return kit.transfer_exceptions_into_jvms(); 1285 } 1286 1287 //-------------------------UncommonTrapCallGenerator----------------------------- 1288 // Internal class which handles all out-of-line calls checking receiver type. 1289 class UncommonTrapCallGenerator : public CallGenerator { 1290 Deoptimization::DeoptReason _reason; 1291 Deoptimization::DeoptAction _action; 1292 1293 public: 1294 UncommonTrapCallGenerator(ciMethod* m, 1295 Deoptimization::DeoptReason reason, 1296 Deoptimization::DeoptAction action) 1297 : CallGenerator(m) 1298 { 1299 _reason = reason; 1300 _action = action; 1301 } 1302 1303 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1304 virtual bool is_trap() const { return true; } 1305 1306 virtual JVMState* generate(JVMState* jvms); 1307 }; 1308 1309 1310 CallGenerator* 1311 CallGenerator::for_uncommon_trap(ciMethod* m, 1312 Deoptimization::DeoptReason reason, 1313 Deoptimization::DeoptAction action) { 1314 return new UncommonTrapCallGenerator(m, reason, action); 1315 } 1316 1317 1318 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1319 GraphKit kit(jvms); 1320 kit.C->print_inlining_update(this); 1321 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1322 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 1323 // Use callsite signature always. 1324 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 1325 int nargs = declared_method->arg_size(); 1326 kit.inc_sp(nargs); 1327 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1328 if (_reason == Deoptimization::Reason_class_check && 1329 _action == Deoptimization::Action_maybe_recompile) { 1330 // Temp fix for 6529811 1331 // Don't allow uncommon_trap to override our decision to recompile in the event 1332 // of a class cast failure for a monomorphic call as it will never let us convert 1333 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1334 bool keep_exact_action = true; 1335 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1336 } else { 1337 kit.uncommon_trap(_reason, _action); 1338 } 1339 return kit.transfer_exceptions_into_jvms(); 1340 } 1341 1342 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1343 1344 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1345 1346 #define NODES_OVERHEAD_PER_METHOD (30.0) 1347 #define NODES_PER_BYTECODE (9.5) 1348 1349 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1350 int call_count = profile.count(); 1351 int code_size = call_method->code_size(); 1352 1353 // Expected execution count is based on the historical count: 1354 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1355 1356 // Expected profit from inlining, in units of simple call-overheads. 1357 _profit = 1.0; 1358 1359 // Expected work performed by the call in units of call-overheads. 1360 // %%% need an empirical curve fit for "work" (time in call) 1361 float bytecodes_per_call = 3; 1362 _work = 1.0 + code_size / bytecodes_per_call; 1363 1364 // Expected size of compilation graph: 1365 // -XX:+PrintParseStatistics once reported: 1366 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1367 // Histogram of 144298 parsed bytecodes: 1368 // %%% Need an better predictor for graph size. 1369 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1370 } 1371 1372 // is_cold: Return true if the node should never be inlined. 1373 // This is true if any of the key metrics are extreme. 1374 bool WarmCallInfo::is_cold() const { 1375 if (count() < WarmCallMinCount) return true; 1376 if (profit() < WarmCallMinProfit) return true; 1377 if (work() > WarmCallMaxWork) return true; 1378 if (size() > WarmCallMaxSize) return true; 1379 return false; 1380 } 1381 1382 // is_hot: Return true if the node should be inlined immediately. 1383 // This is true if any of the key metrics are extreme. 1384 bool WarmCallInfo::is_hot() const { 1385 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1386 if (count() >= HotCallCountThreshold) return true; 1387 if (profit() >= HotCallProfitThreshold) return true; 1388 if (work() <= HotCallTrivialWork) return true; 1389 if (size() <= HotCallTrivialSize) return true; 1390 return false; 1391 } 1392 1393 // compute_heat: 1394 float WarmCallInfo::compute_heat() const { 1395 assert(!is_cold(), "compute heat only on warm nodes"); 1396 assert(!is_hot(), "compute heat only on warm nodes"); 1397 int min_size = MAX2(0, (int)HotCallTrivialSize); 1398 int max_size = MIN2(500, (int)WarmCallMaxSize); 1399 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1400 float size_factor; 1401 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1402 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1403 else if (method_size < 0.5) size_factor = 1; // better than avg. 1404 else size_factor = 0.5; // worse than avg. 1405 return (count() * profit() * size_factor); 1406 } 1407 1408 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1409 assert(this != that, "compare only different WCIs"); 1410 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1411 if (this->heat() > that->heat()) return true; 1412 if (this->heat() < that->heat()) return false; 1413 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1414 // Equal heat. Break the tie some other way. 1415 if (!this->call() || !that->call()) return (address)this > (address)that; 1416 return this->call()->_idx > that->call()->_idx; 1417 } 1418 1419 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1420 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1421 1422 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1423 assert(next() == UNINIT_NEXT, "not yet on any list"); 1424 WarmCallInfo* prev_p = NULL; 1425 WarmCallInfo* next_p = head; 1426 while (next_p != NULL && next_p->warmer_than(this)) { 1427 prev_p = next_p; 1428 next_p = prev_p->next(); 1429 } 1430 // Install this between prev_p and next_p. 1431 this->set_next(next_p); 1432 if (prev_p == NULL) 1433 head = this; 1434 else 1435 prev_p->set_next(this); 1436 return head; 1437 } 1438 1439 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1440 WarmCallInfo* prev_p = NULL; 1441 WarmCallInfo* next_p = head; 1442 while (next_p != this) { 1443 assert(next_p != NULL, "this must be in the list somewhere"); 1444 prev_p = next_p; 1445 next_p = prev_p->next(); 1446 } 1447 next_p = this->next(); 1448 debug_only(this->set_next(UNINIT_NEXT)); 1449 // Remove this from between prev_p and next_p. 1450 if (prev_p == NULL) 1451 head = next_p; 1452 else 1453 prev_p->set_next(next_p); 1454 return head; 1455 } 1456 1457 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1458 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1459 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1460 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1461 1462 WarmCallInfo* WarmCallInfo::always_hot() { 1463 assert(_always_hot.is_hot(), "must always be hot"); 1464 return &_always_hot; 1465 } 1466 1467 WarmCallInfo* WarmCallInfo::always_cold() { 1468 assert(_always_cold.is_cold(), "must always be cold"); 1469 return &_always_cold; 1470 } 1471 1472 1473 #ifndef PRODUCT 1474 1475 void WarmCallInfo::print() const { 1476 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1477 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1478 count(), profit(), work(), size(), compute_heat(), next()); 1479 tty->cr(); 1480 if (call() != NULL) call()->dump(); 1481 } 1482 1483 void print_wci(WarmCallInfo* ci) { 1484 ci->print(); 1485 } 1486 1487 void WarmCallInfo::print_all() const { 1488 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1489 p->print(); 1490 } 1491 1492 int WarmCallInfo::count_all() const { 1493 int cnt = 0; 1494 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1495 cnt++; 1496 return cnt; 1497 } 1498 1499 #endif //PRODUCT