1 /* 2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 #include "opto/valuetypenode.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 45 // Utility function. 46 const TypeFunc* CallGenerator::tf() const { 47 return TypeFunc::make(method()); 48 } 49 50 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) { 51 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m); 52 } 53 54 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) { 55 ciMethod* symbolic_info = caller->get_method_at_bci(bci); 56 return is_inlined_method_handle_intrinsic(symbolic_info, m); 57 } 58 59 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) { 60 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic(); 61 } 62 63 //-----------------------------ParseGenerator--------------------------------- 64 // Internal class which handles all direct bytecode traversal. 65 class ParseGenerator : public InlineCallGenerator { 66 private: 67 bool _is_osr; 68 float _expected_uses; 69 70 public: 71 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 72 : InlineCallGenerator(method) 73 { 74 _is_osr = is_osr; 75 _expected_uses = expected_uses; 76 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 77 } 78 79 virtual bool is_parse() const { return true; } 80 virtual JVMState* generate(JVMState* jvms); 81 int is_osr() { return _is_osr; } 82 83 }; 84 85 JVMState* ParseGenerator::generate(JVMState* jvms) { 86 Compile* C = Compile::current(); 87 C->print_inlining_update(this); 88 89 if (is_osr()) { 90 // The JVMS for a OSR has a single argument (see its TypeFunc). 91 assert(jvms->depth() == 1, "no inline OSR"); 92 } 93 94 if (C->failing()) { 95 return NULL; // bailing out of the compile; do not try to parse 96 } 97 98 Parse parser(jvms, method(), _expected_uses); 99 // Grab signature for matching/allocation 100 GraphKit& exits = parser.exits(); 101 102 if (C->failing()) { 103 while (exits.pop_exception_state() != NULL) ; 104 return NULL; 105 } 106 107 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 108 109 // Simply return the exit state of the parser, 110 // augmented by any exceptional states. 111 return exits.transfer_exceptions_into_jvms(); 112 } 113 114 //---------------------------DirectCallGenerator------------------------------ 115 // Internal class which handles all out-of-line calls w/o receiver type checks. 116 class DirectCallGenerator : public CallGenerator { 117 private: 118 CallStaticJavaNode* _call_node; 119 // Force separate memory and I/O projections for the exceptional 120 // paths to facilitate late inlining. 121 bool _separate_io_proj; 122 123 public: 124 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 125 : CallGenerator(method), 126 _call_node(NULL), 127 _separate_io_proj(separate_io_proj) 128 { 129 if (ValueTypeReturnedAsFields && method->is_method_handle_intrinsic()) { 130 // If that call has not been optimized by the time optimizations are over, 131 // we'll need to add a call to create a value type instance from the klass 132 // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return). 133 // Separating memory and I/O projections for exceptions is required to 134 // perform that graph transformation. 135 _separate_io_proj = true; 136 } 137 } 138 virtual JVMState* generate(JVMState* jvms); 139 140 CallStaticJavaNode* call_node() const { return _call_node; } 141 }; 142 143 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 144 GraphKit kit(jvms); 145 kit.C->print_inlining_update(this); 146 PhaseGVN& gvn = kit.gvn(); 147 bool is_static = method()->is_static(); 148 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 149 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 150 151 if (kit.C->log() != NULL) { 152 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 153 } 154 155 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 156 if (is_inlined_method_handle_intrinsic(jvms, method())) { 157 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter, 158 // additional information about the method being invoked should be attached 159 // to the call site to make resolution logic work 160 // (see SharedRuntime::resolve_static_call_C). 161 call->set_override_symbolic_info(true); 162 } 163 _call_node = call; // Save the call node in case we need it later 164 if (!is_static) { 165 // Make an explicit receiver null_check as part of this call. 166 // Since we share a map with the caller, his JVMS gets adjusted. 167 kit.null_check_receiver_before_call(method()); 168 if (kit.stopped()) { 169 // And dump it back to the caller, decorated with any exceptions: 170 return kit.transfer_exceptions_into_jvms(); 171 } 172 // Mark the call node as virtual, sort of: 173 call->set_optimized_virtual(true); 174 if (method()->is_method_handle_intrinsic() || 175 method()->is_compiled_lambda_form()) { 176 call->set_method_handle_invoke(true); 177 } 178 } 179 kit.set_arguments_for_java_call(call, is_late_inline()); 180 if (kit.stopped()) { 181 return kit.transfer_exceptions_into_jvms(); 182 } 183 kit.set_edges_for_java_call(call, false, _separate_io_proj); 184 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 185 kit.push_node(method()->return_type()->basic_type(), ret); 186 return kit.transfer_exceptions_into_jvms(); 187 } 188 189 //--------------------------VirtualCallGenerator------------------------------ 190 // Internal class which handles all out-of-line calls checking receiver type. 191 class VirtualCallGenerator : public CallGenerator { 192 private: 193 int _vtable_index; 194 public: 195 VirtualCallGenerator(ciMethod* method, int vtable_index) 196 : CallGenerator(method), _vtable_index(vtable_index) 197 { 198 assert(vtable_index == Method::invalid_vtable_index || 199 vtable_index >= 0, "either invalid or usable"); 200 } 201 virtual bool is_virtual() const { return true; } 202 virtual JVMState* generate(JVMState* jvms); 203 }; 204 205 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 206 GraphKit kit(jvms); 207 Node* receiver = kit.argument(0); 208 kit.C->print_inlining_update(this); 209 210 if (kit.C->log() != NULL) { 211 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 212 } 213 214 // If the receiver is a constant null, do not torture the system 215 // by attempting to call through it. The compile will proceed 216 // correctly, but may bail out in final_graph_reshaping, because 217 // the call instruction will have a seemingly deficient out-count. 218 // (The bailout says something misleading about an "infinite loop".) 219 if (!receiver->is_ValueType() && kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 220 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); 221 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 222 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 223 kit.inc_sp(arg_size); // restore arguments 224 kit.uncommon_trap(Deoptimization::Reason_null_check, 225 Deoptimization::Action_none, 226 NULL, "null receiver"); 227 return kit.transfer_exceptions_into_jvms(); 228 } 229 230 // Ideally we would unconditionally do a null check here and let it 231 // be converted to an implicit check based on profile information. 232 // However currently the conversion to implicit null checks in 233 // Block::implicit_null_check() only looks for loads and stores, not calls. 234 ciMethod *caller = kit.method(); 235 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 236 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 237 ((ImplicitNullCheckThreshold > 0) && caller_md && 238 (caller_md->trap_count(Deoptimization::Reason_null_check) 239 >= (uint)ImplicitNullCheckThreshold))) { 240 // Make an explicit receiver null_check as part of this call. 241 // Since we share a map with the caller, his JVMS gets adjusted. 242 receiver = kit.null_check_receiver_before_call(method()); 243 if (kit.stopped()) { 244 // And dump it back to the caller, decorated with any exceptions: 245 return kit.transfer_exceptions_into_jvms(); 246 } 247 } 248 249 assert(!method()->is_static(), "virtual call must not be to static"); 250 assert(!method()->is_final(), "virtual call should not be to final"); 251 assert(!method()->is_private(), "virtual call should not be to private"); 252 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 253 "no vtable calls if +UseInlineCaches "); 254 address target = SharedRuntime::get_resolve_virtual_call_stub(); 255 // Normal inline cache used for call 256 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 257 if (is_inlined_method_handle_intrinsic(jvms, method())) { 258 // To be able to issue a direct call (optimized virtual or virtual) 259 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information 260 // about the method being invoked should be attached to the call site to 261 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C). 262 call->set_override_symbolic_info(true); 263 } 264 kit.set_arguments_for_java_call(call); 265 if (kit.stopped()) { 266 return kit.transfer_exceptions_into_jvms(); 267 } 268 kit.set_edges_for_java_call(call); 269 Node* ret = kit.set_results_for_java_call(call); 270 kit.push_node(method()->return_type()->basic_type(), ret); 271 272 // Represent the effect of an implicit receiver null_check 273 // as part of this call. Since we share a map with the caller, 274 // his JVMS gets adjusted. 275 kit.cast_not_null(receiver); 276 return kit.transfer_exceptions_into_jvms(); 277 } 278 279 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 280 if (InlineTree::check_can_parse(m) != NULL) return NULL; 281 return new ParseGenerator(m, expected_uses); 282 } 283 284 // As a special case, the JVMS passed to this CallGenerator is 285 // for the method execution already in progress, not just the JVMS 286 // of the caller. Thus, this CallGenerator cannot be mixed with others! 287 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 288 if (InlineTree::check_can_parse(m) != NULL) return NULL; 289 float past_uses = m->interpreter_invocation_count(); 290 float expected_uses = past_uses; 291 return new ParseGenerator(m, expected_uses, true); 292 } 293 294 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 295 assert(!m->is_abstract(), "for_direct_call mismatch"); 296 return new DirectCallGenerator(m, separate_io_proj); 297 } 298 299 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 300 assert(!m->is_static(), "for_virtual_call mismatch"); 301 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 302 return new VirtualCallGenerator(m, vtable_index); 303 } 304 305 // Allow inlining decisions to be delayed 306 class LateInlineCallGenerator : public DirectCallGenerator { 307 private: 308 jlong _unique_id; // unique id for log compilation 309 bool _is_pure_call; // a hint that the call doesn't have important side effects to care about 310 311 protected: 312 CallGenerator* _inline_cg; 313 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 314 315 public: 316 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) : 317 DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {} 318 319 virtual bool is_late_inline() const { return true; } 320 321 // Convert the CallStaticJava into an inline 322 virtual void do_late_inline(); 323 324 virtual JVMState* generate(JVMState* jvms) { 325 Compile *C = Compile::current(); 326 327 C->log_inline_id(this); 328 329 // Record that this call site should be revisited once the main 330 // parse is finished. 331 if (!is_mh_late_inline()) { 332 C->add_late_inline(this); 333 } 334 335 // Emit the CallStaticJava and request separate projections so 336 // that the late inlining logic can distinguish between fall 337 // through and exceptional uses of the memory and io projections 338 // as is done for allocations and macro expansion. 339 return DirectCallGenerator::generate(jvms); 340 } 341 342 virtual void print_inlining_late(const char* msg) { 343 CallNode* call = call_node(); 344 Compile* C = Compile::current(); 345 C->print_inlining_assert_ready(); 346 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 347 C->print_inlining_move_to(this); 348 C->print_inlining_update_delayed(this); 349 } 350 351 virtual void set_unique_id(jlong id) { 352 _unique_id = id; 353 } 354 355 virtual jlong unique_id() const { 356 return _unique_id; 357 } 358 }; 359 360 void LateInlineCallGenerator::do_late_inline() { 361 // Can't inline it 362 CallStaticJavaNode* call = call_node(); 363 if (call == NULL || call->outcnt() == 0 || 364 call->in(0) == NULL || call->in(0)->is_top()) { 365 return; 366 } 367 368 const GrowableArray<SigEntry>* sig_cc = method()->get_sig_cc(); 369 const TypeTuple* r = call->tf()->domain_cc(); 370 for (uint i1 = TypeFunc::Parms, i2 = 0; i1 < r->cnt(); i1++) { 371 if (sig_cc != NULL) { 372 // Skip reserved entries 373 while (!SigEntry::skip_value_delimiters(sig_cc, i2)) { 374 i2++; 375 } 376 if (SigEntry::is_reserved_entry(sig_cc, i2++)) { 377 assert(call->in(i1)->is_top(), "should be top"); 378 continue; 379 } 380 } 381 if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) { 382 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 383 return; 384 } 385 } 386 387 if (call->in(TypeFunc::Memory)->is_top()) { 388 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 389 return; 390 } 391 392 // check for unreachable loop 393 CallProjections* callprojs = call->extract_projections(true); 394 if (callprojs->fallthrough_catchproj == call->in(0) || 395 callprojs->catchall_catchproj == call->in(0) || 396 callprojs->fallthrough_memproj == call->in(TypeFunc::Memory) || 397 callprojs->catchall_memproj == call->in(TypeFunc::Memory) || 398 callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O) || 399 callprojs->catchall_ioproj == call->in(TypeFunc::I_O) || 400 (callprojs->exobj != NULL && call->find_edge(callprojs->exobj) != -1)) { 401 return; 402 } 403 bool result_not_used = true; 404 for (uint i = 0; i < callprojs->nb_resproj; i++) { 405 if (callprojs->resproj[i] != NULL) { 406 if (callprojs->resproj[i]->outcnt() != 0) { 407 result_not_used = false; 408 } 409 if (call->find_edge(callprojs->resproj[i]) != -1) { 410 return; 411 } 412 } 413 } 414 415 Compile* C = Compile::current(); 416 // Remove inlined methods from Compiler's lists. 417 if (call->is_macro()) { 418 C->remove_macro_node(call); 419 } 420 421 if (_is_pure_call && result_not_used) { 422 // The call is marked as pure (no important side effects), but result isn't used. 423 // It's safe to remove the call. 424 GraphKit kit(call->jvms()); 425 kit.replace_call(call, C->top(), true); 426 } else { 427 // Make a clone of the JVMState that appropriate to use for driving a parse 428 JVMState* old_jvms = call->jvms(); 429 JVMState* jvms = old_jvms->clone_shallow(C); 430 uint size = call->req(); 431 SafePointNode* map = new SafePointNode(size, jvms); 432 for (uint i1 = 0; i1 < size; i1++) { 433 map->init_req(i1, call->in(i1)); 434 } 435 436 PhaseGVN& gvn = *C->initial_gvn(); 437 // Make sure the state is a MergeMem for parsing. 438 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 439 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); 440 gvn.set_type_bottom(mem); 441 map->set_req(TypeFunc::Memory, mem); 442 } 443 444 // blow away old call arguments 445 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) { 446 map->set_req(i1, C->top()); 447 } 448 jvms->set_map(map); 449 450 // Make enough space in the expression stack to transfer 451 // the incoming arguments and return value. 452 map->ensure_stack(jvms, jvms->method()->max_stack()); 453 const TypeTuple *domain_sig = call->_tf->domain_sig(); 454 ExtendedSignature sig_cc = ExtendedSignature(method()->get_sig_cc(), SigEntryFilter()); 455 uint nargs = method()->arg_size(); 456 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature"); 457 458 uint j = TypeFunc::Parms; 459 for (uint i1 = 0; i1 < nargs; i1++) { 460 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1); 461 if (method()->has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) { 462 // Value type arguments are not passed by reference: we get an argument per 463 // field of the value type. Build ValueTypeNodes from the value type arguments. 464 GraphKit arg_kit(jvms, &gvn); 465 ValueTypeNode* vt = ValueTypeNode::make_from_multi(&arg_kit, call, sig_cc, t->value_klass(), j, true); 466 map->set_control(arg_kit.control()); 467 map->set_argument(jvms, i1, vt); 468 } else { 469 map->set_argument(jvms, i1, call->in(j++)); 470 BasicType bt = t->basic_type(); 471 while (SigEntry::next_is_reserved(sig_cc, bt, true)) { 472 j += type2size[bt]; // Skip reserved arguments 473 } 474 } 475 } 476 477 C->print_inlining_assert_ready(); 478 479 C->print_inlining_move_to(this); 480 481 C->log_late_inline(this); 482 483 // This check is done here because for_method_handle_inline() method 484 // needs jvms for inlined state. 485 if (!do_late_inline_check(jvms)) { 486 map->disconnect_inputs(NULL, C); 487 return; 488 } 489 490 // Allocate a buffer for the returned ValueTypeNode because the caller expects an oop return. 491 // Do this before the method handle call in case the buffer allocation triggers deoptimization. 492 Node* buffer_oop = NULL; 493 if (is_mh_late_inline() && _inline_cg->method()->return_type()->is_valuetype()) { 494 GraphKit arg_kit(jvms, &gvn); 495 { 496 PreserveReexecuteState preexecs(&arg_kit); 497 arg_kit.jvms()->set_should_reexecute(true); 498 arg_kit.inc_sp(nargs); 499 Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(_inline_cg->method()->return_type()->as_value_klass())); 500 buffer_oop = arg_kit.new_instance(klass_node, NULL, NULL, /* deoptimize_on_exception */ true); 501 } 502 jvms = arg_kit.transfer_exceptions_into_jvms(); 503 } 504 505 // Setup default node notes to be picked up by the inlining 506 Node_Notes* old_nn = C->node_notes_at(call->_idx); 507 if (old_nn != NULL) { 508 Node_Notes* entry_nn = old_nn->clone(C); 509 entry_nn->set_jvms(jvms); 510 C->set_default_node_notes(entry_nn); 511 } 512 513 // Now perform the inlining using the synthesized JVMState 514 JVMState* new_jvms = _inline_cg->generate(jvms); 515 if (new_jvms == NULL) return; // no change 516 if (C->failing()) return; 517 518 // Capture any exceptional control flow 519 GraphKit kit(new_jvms); 520 521 // Find the result object 522 Node* result = C->top(); 523 int result_size = method()->return_type()->size(); 524 if (result_size != 0 && !kit.stopped()) { 525 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 526 } 527 528 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 529 C->env()->notice_inlined_method(_inline_cg->method()); 530 C->set_inlining_progress(true); 531 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup 532 533 // Handle value type returns 534 bool returned_as_fields = call->tf()->returns_value_type_as_fields(); 535 if (result->is_ValueType()) { 536 // Only possible if is_mh_late_inline() when the callee does not "know" that the caller expects an oop 537 assert(is_mh_late_inline() && !returned_as_fields, "sanity"); 538 assert(buffer_oop != NULL, "should have allocated a buffer"); 539 ValueTypeNode* vt = result->as_ValueType(); 540 vt->store(&kit, buffer_oop, buffer_oop, vt->type()->value_klass(), 0); 541 result = buffer_oop; 542 } else if (result->is_ValueTypePtr() && returned_as_fields) { 543 result->as_ValueTypePtr()->replace_call_results(&kit, call, C); 544 } 545 546 kit.replace_call(call, result, true); 547 } 548 } 549 550 551 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 552 return new LateInlineCallGenerator(method, inline_cg); 553 } 554 555 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 556 ciMethod* _caller; 557 int _attempt; 558 bool _input_not_const; 559 560 virtual bool do_late_inline_check(JVMState* jvms); 561 virtual bool already_attempted() const { return _attempt > 0; } 562 563 public: 564 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 565 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 566 567 virtual bool is_mh_late_inline() const { return true; } 568 569 virtual JVMState* generate(JVMState* jvms) { 570 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 571 572 Compile* C = Compile::current(); 573 if (_input_not_const) { 574 // inlining won't be possible so no need to enqueue right now. 575 call_node()->set_generator(this); 576 } else { 577 C->add_late_inline(this); 578 } 579 return new_jvms; 580 } 581 }; 582 583 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 584 585 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const, AlwaysIncrementalInline); 586 587 Compile::current()->print_inlining_update_delayed(this); 588 589 if (!_input_not_const) { 590 _attempt++; 591 } 592 593 if (cg != NULL && (cg->is_inline() || cg->is_inlined_method_handle_intrinsic(jvms, cg->method()))) { 594 assert(!cg->is_late_inline(), "we're doing late inlining"); 595 _inline_cg = cg; 596 Compile::current()->dec_number_of_mh_late_inlines(); 597 return true; 598 } 599 600 call_node()->set_generator(this); 601 return false; 602 } 603 604 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 605 Compile::current()->inc_number_of_mh_late_inlines(); 606 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 607 return cg; 608 } 609 610 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 611 612 public: 613 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 614 LateInlineCallGenerator(method, inline_cg) {} 615 616 virtual JVMState* generate(JVMState* jvms) { 617 Compile *C = Compile::current(); 618 619 C->log_inline_id(this); 620 621 C->add_string_late_inline(this); 622 623 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 624 return new_jvms; 625 } 626 627 virtual bool is_string_late_inline() const { return true; } 628 }; 629 630 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 631 return new LateInlineStringCallGenerator(method, inline_cg); 632 } 633 634 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 635 636 public: 637 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 638 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {} 639 640 virtual JVMState* generate(JVMState* jvms) { 641 Compile *C = Compile::current(); 642 643 C->log_inline_id(this); 644 645 C->add_boxing_late_inline(this); 646 647 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 648 return new_jvms; 649 } 650 }; 651 652 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 653 return new LateInlineBoxingCallGenerator(method, inline_cg); 654 } 655 656 //---------------------------WarmCallGenerator-------------------------------- 657 // Internal class which handles initial deferral of inlining decisions. 658 class WarmCallGenerator : public CallGenerator { 659 WarmCallInfo* _call_info; 660 CallGenerator* _if_cold; 661 CallGenerator* _if_hot; 662 bool _is_virtual; // caches virtuality of if_cold 663 bool _is_inline; // caches inline-ness of if_hot 664 665 public: 666 WarmCallGenerator(WarmCallInfo* ci, 667 CallGenerator* if_cold, 668 CallGenerator* if_hot) 669 : CallGenerator(if_cold->method()) 670 { 671 assert(method() == if_hot->method(), "consistent choices"); 672 _call_info = ci; 673 _if_cold = if_cold; 674 _if_hot = if_hot; 675 _is_virtual = if_cold->is_virtual(); 676 _is_inline = if_hot->is_inline(); 677 } 678 679 virtual bool is_inline() const { return _is_inline; } 680 virtual bool is_virtual() const { return _is_virtual; } 681 virtual bool is_deferred() const { return true; } 682 683 virtual JVMState* generate(JVMState* jvms); 684 }; 685 686 687 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 688 CallGenerator* if_cold, 689 CallGenerator* if_hot) { 690 return new WarmCallGenerator(ci, if_cold, if_hot); 691 } 692 693 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 694 Compile* C = Compile::current(); 695 C->print_inlining_update(this); 696 697 if (C->log() != NULL) { 698 C->log()->elem("warm_call bci='%d'", jvms->bci()); 699 } 700 jvms = _if_cold->generate(jvms); 701 if (jvms != NULL) { 702 Node* m = jvms->map()->control(); 703 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 704 if (m->is_Catch()) m = m->in(0); else m = C->top(); 705 if (m->is_Proj()) m = m->in(0); else m = C->top(); 706 if (m->is_CallJava()) { 707 _call_info->set_call(m->as_Call()); 708 _call_info->set_hot_cg(_if_hot); 709 #ifndef PRODUCT 710 if (PrintOpto || PrintOptoInlining) { 711 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 712 tty->print("WCI: "); 713 _call_info->print(); 714 } 715 #endif 716 _call_info->set_heat(_call_info->compute_heat()); 717 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 718 } 719 } 720 return jvms; 721 } 722 723 void WarmCallInfo::make_hot() { 724 Unimplemented(); 725 } 726 727 void WarmCallInfo::make_cold() { 728 // No action: Just dequeue. 729 } 730 731 732 //------------------------PredictedCallGenerator------------------------------ 733 // Internal class which handles all out-of-line calls checking receiver type. 734 class PredictedCallGenerator : public CallGenerator { 735 ciKlass* _predicted_receiver; 736 CallGenerator* _if_missed; 737 CallGenerator* _if_hit; 738 float _hit_prob; 739 bool _exact_check; 740 741 public: 742 PredictedCallGenerator(ciKlass* predicted_receiver, 743 CallGenerator* if_missed, 744 CallGenerator* if_hit, bool exact_check, 745 float hit_prob) 746 : CallGenerator(if_missed->method()) 747 { 748 // The call profile data may predict the hit_prob as extreme as 0 or 1. 749 // Remove the extremes values from the range. 750 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 751 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 752 753 _predicted_receiver = predicted_receiver; 754 _if_missed = if_missed; 755 _if_hit = if_hit; 756 _hit_prob = hit_prob; 757 _exact_check = exact_check; 758 } 759 760 virtual bool is_virtual() const { return true; } 761 virtual bool is_inline() const { return _if_hit->is_inline(); } 762 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 763 764 virtual JVMState* generate(JVMState* jvms); 765 }; 766 767 768 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 769 CallGenerator* if_missed, 770 CallGenerator* if_hit, 771 float hit_prob) { 772 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, 773 /*exact_check=*/true, hit_prob); 774 } 775 776 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver, 777 CallGenerator* if_missed, 778 CallGenerator* if_hit) { 779 return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit, 780 /*exact_check=*/false, PROB_ALWAYS); 781 } 782 783 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 784 GraphKit kit(jvms); 785 kit.C->print_inlining_update(this); 786 PhaseGVN& gvn = kit.gvn(); 787 // We need an explicit receiver null_check before checking its type. 788 // We share a map with the caller, so his JVMS gets adjusted. 789 Node* receiver = kit.argument(0); 790 CompileLog* log = kit.C->log(); 791 if (log != NULL) { 792 log->elem("predicted_call bci='%d' exact='%d' klass='%d'", 793 jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver)); 794 } 795 796 receiver = kit.null_check_receiver_before_call(method()); 797 if (kit.stopped()) { 798 return kit.transfer_exceptions_into_jvms(); 799 } 800 801 // Make a copy of the replaced nodes in case we need to restore them 802 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 803 replaced_nodes.clone(); 804 805 Node* casted_receiver = receiver; // will get updated in place... 806 Node* slow_ctl = NULL; 807 if (_exact_check) { 808 slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob, 809 &casted_receiver); 810 } else { 811 slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver, 812 &casted_receiver); 813 } 814 815 SafePointNode* slow_map = NULL; 816 JVMState* slow_jvms = NULL; 817 { PreserveJVMState pjvms(&kit); 818 kit.set_control(slow_ctl); 819 if (!kit.stopped()) { 820 slow_jvms = _if_missed->generate(kit.sync_jvms()); 821 if (kit.failing()) 822 return NULL; // might happen because of NodeCountInliningCutoff 823 assert(slow_jvms != NULL, "must be"); 824 kit.add_exception_states_from(slow_jvms); 825 kit.set_map(slow_jvms->map()); 826 if (!kit.stopped()) 827 slow_map = kit.stop(); 828 } 829 } 830 831 if (kit.stopped()) { 832 // Instance exactly does not matches the desired type. 833 kit.set_jvms(slow_jvms); 834 return kit.transfer_exceptions_into_jvms(); 835 } 836 837 // fall through if the instance exactly matches the desired type 838 kit.replace_in_map(receiver, casted_receiver); 839 840 // Make the hot call: 841 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 842 if (new_jvms == NULL) { 843 // Inline failed, so make a direct call. 844 assert(_if_hit->is_inline(), "must have been a failed inline"); 845 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 846 new_jvms = cg->generate(kit.sync_jvms()); 847 } 848 kit.add_exception_states_from(new_jvms); 849 kit.set_jvms(new_jvms); 850 851 // Need to merge slow and fast? 852 if (slow_map == NULL) { 853 // The fast path is the only path remaining. 854 return kit.transfer_exceptions_into_jvms(); 855 } 856 857 if (kit.stopped()) { 858 // Inlined method threw an exception, so it's just the slow path after all. 859 kit.set_jvms(slow_jvms); 860 return kit.transfer_exceptions_into_jvms(); 861 } 862 863 // Allocate value types if they are merged with objects (similar to Parse::merge_common()) 864 uint tos = kit.jvms()->stkoff() + kit.sp(); 865 uint limit = slow_map->req(); 866 for (uint i = TypeFunc::Parms; i < limit; i++) { 867 Node* m = kit.map()->in(i); 868 Node* n = slow_map->in(i); 869 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 870 if (m->is_ValueType() && !t->isa_valuetype()) { 871 // Allocate value type in fast path 872 m = ValueTypePtrNode::make_from_value_type(&kit, m->as_ValueType()); 873 kit.map()->set_req(i, m); 874 } 875 if (n->is_ValueType() && !t->isa_valuetype()) { 876 // Allocate value type in slow path 877 PreserveJVMState pjvms(&kit); 878 kit.set_map(slow_map); 879 n = ValueTypePtrNode::make_from_value_type(&kit, n->as_ValueType()); 880 kit.map()->set_req(i, n); 881 slow_map = kit.stop(); 882 } 883 } 884 885 // There are 2 branches and the replaced nodes are only valid on 886 // one: restore the replaced nodes to what they were before the 887 // branch. 888 kit.map()->set_replaced_nodes(replaced_nodes); 889 890 // Finish the diamond. 891 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 892 RegionNode* region = new RegionNode(3); 893 region->init_req(1, kit.control()); 894 region->init_req(2, slow_map->control()); 895 kit.set_control(gvn.transform(region)); 896 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 897 iophi->set_req(2, slow_map->i_o()); 898 kit.set_i_o(gvn.transform(iophi)); 899 // Merge memory 900 kit.merge_memory(slow_map->merged_memory(), region, 2); 901 // Transform new memory Phis. 902 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 903 Node* phi = mms.memory(); 904 if (phi->is_Phi() && phi->in(0) == region) { 905 mms.set_memory(gvn.transform(phi)); 906 } 907 } 908 for (uint i = TypeFunc::Parms; i < limit; i++) { 909 // Skip unused stack slots; fast forward to monoff(); 910 if (i == tos) { 911 i = kit.jvms()->monoff(); 912 if( i >= limit ) break; 913 } 914 Node* m = kit.map()->in(i); 915 Node* n = slow_map->in(i); 916 if (m != n) { 917 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 918 Node* phi = PhiNode::make(region, m, t); 919 phi->set_req(2, n); 920 kit.map()->set_req(i, gvn.transform(phi)); 921 } 922 } 923 return kit.transfer_exceptions_into_jvms(); 924 } 925 926 927 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 928 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); 929 bool input_not_const; 930 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const, false); 931 Compile* C = Compile::current(); 932 if (cg != NULL) { 933 if (!delayed_forbidden && AlwaysIncrementalInline) { 934 return CallGenerator::for_late_inline(callee, cg); 935 } else { 936 return cg; 937 } 938 } 939 int bci = jvms->bci(); 940 ciCallProfile profile = caller->call_profile_at_bci(bci); 941 int call_site_count = caller->scale_count(profile.count()); 942 943 if (IncrementalInline && (AlwaysIncrementalInline || 944 (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) { 945 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 946 } else { 947 // Out-of-line call. 948 return CallGenerator::for_direct_call(callee); 949 } 950 } 951 952 static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit) { 953 PhaseGVN& gvn = kit.gvn(); 954 Node* arg = kit.argument(arg_nb); 955 const Type* arg_type = arg->bottom_type(); 956 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 957 if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) { 958 const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part 959 arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type)); 960 kit.set_argument(arg_nb, arg); 961 } 962 if (sig_type->is_valuetypeptr() && !arg->is_ValueType() && 963 !kit.gvn().type(arg)->maybe_null() && t->as_value_klass()->is_scalarizable()) { 964 arg = ValueTypeNode::make_from_oop(&kit, arg, t->as_value_klass()); 965 kit.set_argument(arg_nb, arg); 966 } 967 } 968 969 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) { 970 GraphKit kit(jvms); 971 PhaseGVN& gvn = kit.gvn(); 972 Compile* C = kit.C; 973 vmIntrinsics::ID iid = callee->intrinsic_id(); 974 input_not_const = true; 975 switch (iid) { 976 case vmIntrinsics::_invokeBasic: 977 { 978 // Get MethodHandle receiver: 979 Node* receiver = kit.argument(0); 980 if (receiver->Opcode() == Op_ConP) { 981 input_not_const = false; 982 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 983 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 984 const int vtable_index = Method::invalid_vtable_index; 985 986 if (!ciMethod::is_consistent_info(callee, target)) { 987 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 988 "signatures mismatch"); 989 return NULL; 990 } 991 992 CallGenerator* cg = C->call_generator(target, vtable_index, 993 false /* call_does_dispatch */, 994 jvms, 995 true /* allow_inline */, 996 PROB_ALWAYS, 997 NULL, 998 true, 999 delayed_forbidden); 1000 return cg; 1001 } else { 1002 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1003 "receiver not constant"); 1004 } 1005 } 1006 break; 1007 1008 case vmIntrinsics::_linkToVirtual: 1009 case vmIntrinsics::_linkToStatic: 1010 case vmIntrinsics::_linkToSpecial: 1011 case vmIntrinsics::_linkToInterface: 1012 { 1013 int nargs = callee->arg_size(); 1014 // Get MemberName argument: 1015 Node* member_name = kit.argument(nargs - 1); 1016 if (member_name->Opcode() == Op_ConP) { 1017 input_not_const = false; 1018 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 1019 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 1020 1021 if (!ciMethod::is_consistent_info(callee, target)) { 1022 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1023 "signatures mismatch"); 1024 return NULL; 1025 } 1026 1027 // In lambda forms we erase signature types to avoid resolving issues 1028 // involving class loaders. When we optimize a method handle invoke 1029 // to a direct call we must cast the receiver and arguments to its 1030 // actual types. 1031 ciSignature* signature = target->signature(); 1032 const int receiver_skip = target->is_static() ? 0 : 1; 1033 // Cast receiver to its type. 1034 if (!target->is_static()) { 1035 cast_argument(nargs, 0, signature->accessing_klass(), kit); 1036 } 1037 // Cast reference arguments to its type. 1038 for (int i = 0, j = 0; i < signature->count(); i++) { 1039 ciType* t = signature->type_at(i); 1040 if (t->is_klass()) { 1041 cast_argument(nargs, receiver_skip + j, t, kit); 1042 } 1043 j += t->size(); // long and double take two slots 1044 } 1045 1046 // Try to get the most accurate receiver type 1047 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 1048 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 1049 int vtable_index = Method::invalid_vtable_index; 1050 bool call_does_dispatch = false; 1051 1052 ciKlass* speculative_receiver_type = NULL; 1053 if (is_virtual_or_interface) { 1054 ciInstanceKlass* klass = target->holder(); 1055 Node* receiver_node = kit.argument(0); 1056 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 1057 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 1058 // optimize_virtual_call() takes 2 different holder 1059 // arguments for a corner case that doesn't apply here (see 1060 // Parse::do_call()) 1061 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass, 1062 target, receiver_type, is_virtual, 1063 call_does_dispatch, vtable_index, // out-parameters 1064 false /* check_access */); 1065 // We lack profiling at this call but type speculation may 1066 // provide us with a type 1067 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; 1068 } 1069 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, 1070 !StressMethodHandleLinkerInlining /* allow_inline */, 1071 PROB_ALWAYS, 1072 speculative_receiver_type, 1073 true, 1074 delayed_forbidden); 1075 return cg; 1076 } else { 1077 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1078 "member_name not constant"); 1079 } 1080 } 1081 break; 1082 1083 default: 1084 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)); 1085 break; 1086 } 1087 return NULL; 1088 } 1089 1090 1091 //------------------------PredicatedIntrinsicGenerator------------------------------ 1092 // Internal class which handles all predicated Intrinsic calls. 1093 class PredicatedIntrinsicGenerator : public CallGenerator { 1094 CallGenerator* _intrinsic; 1095 CallGenerator* _cg; 1096 1097 public: 1098 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 1099 CallGenerator* cg) 1100 : CallGenerator(cg->method()) 1101 { 1102 _intrinsic = intrinsic; 1103 _cg = cg; 1104 } 1105 1106 virtual bool is_virtual() const { return true; } 1107 virtual bool is_inlined() const { return true; } 1108 virtual bool is_intrinsic() const { return true; } 1109 1110 virtual JVMState* generate(JVMState* jvms); 1111 }; 1112 1113 1114 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 1115 CallGenerator* cg) { 1116 return new PredicatedIntrinsicGenerator(intrinsic, cg); 1117 } 1118 1119 1120 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 1121 // The code we want to generate here is: 1122 // if (receiver == NULL) 1123 // uncommon_Trap 1124 // if (predicate(0)) 1125 // do_intrinsic(0) 1126 // else 1127 // if (predicate(1)) 1128 // do_intrinsic(1) 1129 // ... 1130 // else 1131 // do_java_comp 1132 1133 GraphKit kit(jvms); 1134 PhaseGVN& gvn = kit.gvn(); 1135 1136 CompileLog* log = kit.C->log(); 1137 if (log != NULL) { 1138 log->elem("predicated_intrinsic bci='%d' method='%d'", 1139 jvms->bci(), log->identify(method())); 1140 } 1141 1142 if (!method()->is_static()) { 1143 // We need an explicit receiver null_check before checking its type in predicate. 1144 // We share a map with the caller, so his JVMS gets adjusted. 1145 kit.null_check_receiver_before_call(method()); 1146 if (kit.stopped()) { 1147 return kit.transfer_exceptions_into_jvms(); 1148 } 1149 } 1150 1151 int n_predicates = _intrinsic->predicates_count(); 1152 assert(n_predicates > 0, "sanity"); 1153 1154 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 1155 1156 // Region for normal compilation code if intrinsic failed. 1157 Node* slow_region = new RegionNode(1); 1158 1159 int results = 0; 1160 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 1161 #ifdef ASSERT 1162 JVMState* old_jvms = kit.jvms(); 1163 SafePointNode* old_map = kit.map(); 1164 Node* old_io = old_map->i_o(); 1165 Node* old_mem = old_map->memory(); 1166 Node* old_exc = old_map->next_exception(); 1167 #endif 1168 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 1169 #ifdef ASSERT 1170 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 1171 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 1172 SafePointNode* new_map = kit.map(); 1173 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 1174 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 1175 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 1176 #endif 1177 if (!kit.stopped()) { 1178 PreserveJVMState pjvms(&kit); 1179 // Generate intrinsic code: 1180 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 1181 if (new_jvms == NULL) { 1182 // Intrinsic failed, use normal compilation path for this predicate. 1183 slow_region->add_req(kit.control()); 1184 } else { 1185 kit.add_exception_states_from(new_jvms); 1186 kit.set_jvms(new_jvms); 1187 if (!kit.stopped()) { 1188 result_jvms[results++] = kit.jvms(); 1189 } 1190 } 1191 } 1192 if (else_ctrl == NULL) { 1193 else_ctrl = kit.C->top(); 1194 } 1195 kit.set_control(else_ctrl); 1196 } 1197 if (!kit.stopped()) { 1198 // Final 'else' after predicates. 1199 slow_region->add_req(kit.control()); 1200 } 1201 if (slow_region->req() > 1) { 1202 PreserveJVMState pjvms(&kit); 1203 // Generate normal compilation code: 1204 kit.set_control(gvn.transform(slow_region)); 1205 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1206 if (kit.failing()) 1207 return NULL; // might happen because of NodeCountInliningCutoff 1208 assert(new_jvms != NULL, "must be"); 1209 kit.add_exception_states_from(new_jvms); 1210 kit.set_jvms(new_jvms); 1211 if (!kit.stopped()) { 1212 result_jvms[results++] = kit.jvms(); 1213 } 1214 } 1215 1216 if (results == 0) { 1217 // All paths ended in uncommon traps. 1218 (void) kit.stop(); 1219 return kit.transfer_exceptions_into_jvms(); 1220 } 1221 1222 if (results == 1) { // Only one path 1223 kit.set_jvms(result_jvms[0]); 1224 return kit.transfer_exceptions_into_jvms(); 1225 } 1226 1227 // Merge all paths. 1228 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1229 RegionNode* region = new RegionNode(results + 1); 1230 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1231 for (int i = 0; i < results; i++) { 1232 JVMState* jvms = result_jvms[i]; 1233 int path = i + 1; 1234 SafePointNode* map = jvms->map(); 1235 region->init_req(path, map->control()); 1236 iophi->set_req(path, map->i_o()); 1237 if (i == 0) { 1238 kit.set_jvms(jvms); 1239 } else { 1240 kit.merge_memory(map->merged_memory(), region, path); 1241 } 1242 } 1243 kit.set_control(gvn.transform(region)); 1244 kit.set_i_o(gvn.transform(iophi)); 1245 // Transform new memory Phis. 1246 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1247 Node* phi = mms.memory(); 1248 if (phi->is_Phi() && phi->in(0) == region) { 1249 mms.set_memory(gvn.transform(phi)); 1250 } 1251 } 1252 1253 // Merge debug info. 1254 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1255 uint tos = kit.jvms()->stkoff() + kit.sp(); 1256 Node* map = kit.map(); 1257 uint limit = map->req(); 1258 for (uint i = TypeFunc::Parms; i < limit; i++) { 1259 // Skip unused stack slots; fast forward to monoff(); 1260 if (i == tos) { 1261 i = kit.jvms()->monoff(); 1262 if( i >= limit ) break; 1263 } 1264 Node* n = map->in(i); 1265 ins[0] = n; 1266 const Type* t = gvn.type(n); 1267 bool needs_phi = false; 1268 for (int j = 1; j < results; j++) { 1269 JVMState* jvms = result_jvms[j]; 1270 Node* jmap = jvms->map(); 1271 Node* m = NULL; 1272 if (jmap->req() > i) { 1273 m = jmap->in(i); 1274 if (m != n) { 1275 needs_phi = true; 1276 t = t->meet_speculative(gvn.type(m)); 1277 } 1278 } 1279 ins[j] = m; 1280 } 1281 if (needs_phi) { 1282 Node* phi = PhiNode::make(region, n, t); 1283 for (int j = 1; j < results; j++) { 1284 phi->set_req(j + 1, ins[j]); 1285 } 1286 map->set_req(i, gvn.transform(phi)); 1287 } 1288 } 1289 1290 return kit.transfer_exceptions_into_jvms(); 1291 } 1292 1293 //-------------------------UncommonTrapCallGenerator----------------------------- 1294 // Internal class which handles all out-of-line calls checking receiver type. 1295 class UncommonTrapCallGenerator : public CallGenerator { 1296 Deoptimization::DeoptReason _reason; 1297 Deoptimization::DeoptAction _action; 1298 1299 public: 1300 UncommonTrapCallGenerator(ciMethod* m, 1301 Deoptimization::DeoptReason reason, 1302 Deoptimization::DeoptAction action) 1303 : CallGenerator(m) 1304 { 1305 _reason = reason; 1306 _action = action; 1307 } 1308 1309 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1310 virtual bool is_trap() const { return true; } 1311 1312 virtual JVMState* generate(JVMState* jvms); 1313 }; 1314 1315 1316 CallGenerator* 1317 CallGenerator::for_uncommon_trap(ciMethod* m, 1318 Deoptimization::DeoptReason reason, 1319 Deoptimization::DeoptAction action) { 1320 return new UncommonTrapCallGenerator(m, reason, action); 1321 } 1322 1323 1324 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1325 GraphKit kit(jvms); 1326 kit.C->print_inlining_update(this); 1327 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1328 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 1329 // Use callsite signature always. 1330 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 1331 int nargs = declared_method->arg_size(); 1332 kit.inc_sp(nargs); 1333 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1334 if (_reason == Deoptimization::Reason_class_check && 1335 _action == Deoptimization::Action_maybe_recompile) { 1336 // Temp fix for 6529811 1337 // Don't allow uncommon_trap to override our decision to recompile in the event 1338 // of a class cast failure for a monomorphic call as it will never let us convert 1339 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1340 bool keep_exact_action = true; 1341 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1342 } else { 1343 kit.uncommon_trap(_reason, _action); 1344 } 1345 return kit.transfer_exceptions_into_jvms(); 1346 } 1347 1348 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1349 1350 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1351 1352 #define NODES_OVERHEAD_PER_METHOD (30.0) 1353 #define NODES_PER_BYTECODE (9.5) 1354 1355 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1356 int call_count = profile.count(); 1357 int code_size = call_method->code_size(); 1358 1359 // Expected execution count is based on the historical count: 1360 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1361 1362 // Expected profit from inlining, in units of simple call-overheads. 1363 _profit = 1.0; 1364 1365 // Expected work performed by the call in units of call-overheads. 1366 // %%% need an empirical curve fit for "work" (time in call) 1367 float bytecodes_per_call = 3; 1368 _work = 1.0 + code_size / bytecodes_per_call; 1369 1370 // Expected size of compilation graph: 1371 // -XX:+PrintParseStatistics once reported: 1372 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1373 // Histogram of 144298 parsed bytecodes: 1374 // %%% Need an better predictor for graph size. 1375 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1376 } 1377 1378 // is_cold: Return true if the node should never be inlined. 1379 // This is true if any of the key metrics are extreme. 1380 bool WarmCallInfo::is_cold() const { 1381 if (count() < WarmCallMinCount) return true; 1382 if (profit() < WarmCallMinProfit) return true; 1383 if (work() > WarmCallMaxWork) return true; 1384 if (size() > WarmCallMaxSize) return true; 1385 return false; 1386 } 1387 1388 // is_hot: Return true if the node should be inlined immediately. 1389 // This is true if any of the key metrics are extreme. 1390 bool WarmCallInfo::is_hot() const { 1391 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1392 if (count() >= HotCallCountThreshold) return true; 1393 if (profit() >= HotCallProfitThreshold) return true; 1394 if (work() <= HotCallTrivialWork) return true; 1395 if (size() <= HotCallTrivialSize) return true; 1396 return false; 1397 } 1398 1399 // compute_heat: 1400 float WarmCallInfo::compute_heat() const { 1401 assert(!is_cold(), "compute heat only on warm nodes"); 1402 assert(!is_hot(), "compute heat only on warm nodes"); 1403 int min_size = MAX2(0, (int)HotCallTrivialSize); 1404 int max_size = MIN2(500, (int)WarmCallMaxSize); 1405 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1406 float size_factor; 1407 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1408 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1409 else if (method_size < 0.5) size_factor = 1; // better than avg. 1410 else size_factor = 0.5; // worse than avg. 1411 return (count() * profit() * size_factor); 1412 } 1413 1414 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1415 assert(this != that, "compare only different WCIs"); 1416 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1417 if (this->heat() > that->heat()) return true; 1418 if (this->heat() < that->heat()) return false; 1419 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1420 // Equal heat. Break the tie some other way. 1421 if (!this->call() || !that->call()) return (address)this > (address)that; 1422 return this->call()->_idx > that->call()->_idx; 1423 } 1424 1425 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1426 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1427 1428 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1429 assert(next() == UNINIT_NEXT, "not yet on any list"); 1430 WarmCallInfo* prev_p = NULL; 1431 WarmCallInfo* next_p = head; 1432 while (next_p != NULL && next_p->warmer_than(this)) { 1433 prev_p = next_p; 1434 next_p = prev_p->next(); 1435 } 1436 // Install this between prev_p and next_p. 1437 this->set_next(next_p); 1438 if (prev_p == NULL) 1439 head = this; 1440 else 1441 prev_p->set_next(this); 1442 return head; 1443 } 1444 1445 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1446 WarmCallInfo* prev_p = NULL; 1447 WarmCallInfo* next_p = head; 1448 while (next_p != this) { 1449 assert(next_p != NULL, "this must be in the list somewhere"); 1450 prev_p = next_p; 1451 next_p = prev_p->next(); 1452 } 1453 next_p = this->next(); 1454 debug_only(this->set_next(UNINIT_NEXT)); 1455 // Remove this from between prev_p and next_p. 1456 if (prev_p == NULL) 1457 head = next_p; 1458 else 1459 prev_p->set_next(next_p); 1460 return head; 1461 } 1462 1463 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1464 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1465 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1466 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1467 1468 WarmCallInfo* WarmCallInfo::always_hot() { 1469 assert(_always_hot.is_hot(), "must always be hot"); 1470 return &_always_hot; 1471 } 1472 1473 WarmCallInfo* WarmCallInfo::always_cold() { 1474 assert(_always_cold.is_cold(), "must always be cold"); 1475 return &_always_cold; 1476 } 1477 1478 1479 #ifndef PRODUCT 1480 1481 void WarmCallInfo::print() const { 1482 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1483 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1484 count(), profit(), work(), size(), compute_heat(), next()); 1485 tty->cr(); 1486 if (call() != NULL) call()->dump(); 1487 } 1488 1489 void print_wci(WarmCallInfo* ci) { 1490 ci->print(); 1491 } 1492 1493 void WarmCallInfo::print_all() const { 1494 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1495 p->print(); 1496 } 1497 1498 int WarmCallInfo::count_all() const { 1499 int cnt = 0; 1500 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1501 cnt++; 1502 return cnt; 1503 } 1504 1505 #endif //PRODUCT