1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 #include "opto/valuetypenode.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 45 // Utility function. 46 const TypeFunc* CallGenerator::tf() const { 47 return TypeFunc::make(method()); 48 } 49 50 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* callee) { 51 ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci()); 52 return symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic(); 53 } 54 55 //-----------------------------ParseGenerator--------------------------------- 56 // Internal class which handles all direct bytecode traversal. 57 class ParseGenerator : public InlineCallGenerator { 58 private: 59 bool _is_osr; 60 float _expected_uses; 61 62 public: 63 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 64 : InlineCallGenerator(method) 65 { 66 _is_osr = is_osr; 67 _expected_uses = expected_uses; 68 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 69 } 70 71 virtual bool is_parse() const { return true; } 72 virtual JVMState* generate(JVMState* jvms); 73 int is_osr() { return _is_osr; } 74 75 }; 76 77 JVMState* ParseGenerator::generate(JVMState* jvms) { 78 Compile* C = Compile::current(); 79 C->print_inlining_update(this); 80 81 if (is_osr()) { 82 // The JVMS for a OSR has a single argument (see its TypeFunc). 83 assert(jvms->depth() == 1, "no inline OSR"); 84 } 85 86 if (C->failing()) { 87 return NULL; // bailing out of the compile; do not try to parse 88 } 89 90 Parse parser(jvms, method(), _expected_uses); 91 // Grab signature for matching/allocation 92 #ifdef ASSERT 93 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 94 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 95 assert(C->env()->system_dictionary_modification_counter_changed(), 96 "Must invalidate if TypeFuncs differ"); 97 } 98 #endif 99 100 GraphKit& exits = parser.exits(); 101 102 if (C->failing()) { 103 while (exits.pop_exception_state() != NULL) ; 104 return NULL; 105 } 106 107 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 108 109 // Simply return the exit state of the parser, 110 // augmented by any exceptional states. 111 return exits.transfer_exceptions_into_jvms(); 112 } 113 114 //---------------------------DirectCallGenerator------------------------------ 115 // Internal class which handles all out-of-line calls w/o receiver type checks. 116 class DirectCallGenerator : public CallGenerator { 117 private: 118 CallStaticJavaNode* _call_node; 119 // Force separate memory and I/O projections for the exceptional 120 // paths to facilitate late inlining. 121 bool _separate_io_proj; 122 123 public: 124 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 125 : CallGenerator(method), 126 _separate_io_proj(separate_io_proj) 127 { 128 if (method->is_method_handle_intrinsic() && 129 method->signature()->return_type() == ciEnv::current()->___Value_klass()) { 130 // If that call has not been optimized by the time optimizations 131 // are over, we'll need to add a call to create a value type 132 // instance from the klass returned by the call. Separating 133 // memory and I/O projections for exceptions is required to 134 // perform that graph transformation. 135 _separate_io_proj = true; 136 } 137 } 138 virtual JVMState* generate(JVMState* jvms); 139 140 CallStaticJavaNode* call_node() const { return _call_node; } 141 }; 142 143 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 144 GraphKit kit(jvms); 145 kit.C->print_inlining_update(this); 146 PhaseGVN& gvn = kit.gvn(); 147 bool is_static = method()->is_static(); 148 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 149 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 150 151 if (kit.C->log() != NULL) { 152 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 153 } 154 155 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 156 if (is_inlined_method_handle_intrinsic(jvms, method())) { 157 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter, 158 // additional information about the method being invoked should be attached 159 // to the call site to make resolution logic work 160 // (see SharedRuntime::resolve_static_call_C). 161 call->set_override_symbolic_info(true); 162 } 163 _call_node = call; // Save the call node in case we need it later 164 if (!is_static) { 165 if (!kit.argument(0)->is_ValueType()) { 166 // Make an explicit receiver null_check as part of this call. 167 // Since we share a map with the caller, his JVMS gets adjusted. 168 kit.null_check_receiver_before_call(method()); 169 } 170 if (kit.stopped()) { 171 // And dump it back to the caller, decorated with any exceptions: 172 return kit.transfer_exceptions_into_jvms(); 173 } 174 // Mark the call node as virtual, sort of: 175 call->set_optimized_virtual(true); 176 if (method()->is_method_handle_intrinsic() || 177 method()->is_compiled_lambda_form()) { 178 call->set_method_handle_invoke(true); 179 } 180 } 181 kit.set_arguments_for_java_call(call); 182 kit.set_edges_for_java_call(call, false, _separate_io_proj); 183 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 184 // Check if return value is a value type pointer 185 const TypeValueTypePtr* vtptr = gvn.type(ret)->isa_valuetypeptr(); 186 if (vtptr != NULL) { 187 if (vtptr->klass() != kit.C->env()->___Value_klass()) { 188 // Create ValueTypeNode from the oop and replace the return value 189 Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret); 190 kit.push_node(T_VALUETYPE, vt); 191 } else { 192 kit.push_node(T_VALUETYPE, ret); 193 } 194 } else { 195 kit.push_node(method()->return_type()->basic_type(), ret); 196 } 197 return kit.transfer_exceptions_into_jvms(); 198 } 199 200 //--------------------------VirtualCallGenerator------------------------------ 201 // Internal class which handles all out-of-line calls checking receiver type. 202 class VirtualCallGenerator : public CallGenerator { 203 private: 204 int _vtable_index; 205 public: 206 VirtualCallGenerator(ciMethod* method, int vtable_index) 207 : CallGenerator(method), _vtable_index(vtable_index) 208 { 209 assert(vtable_index == Method::invalid_vtable_index || 210 vtable_index >= 0, "either invalid or usable"); 211 } 212 virtual bool is_virtual() const { return true; } 213 virtual JVMState* generate(JVMState* jvms); 214 }; 215 216 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 217 GraphKit kit(jvms); 218 Node* receiver = kit.argument(0); 219 PhaseGVN& gvn = kit.gvn(); 220 kit.C->print_inlining_update(this); 221 222 if (kit.C->log() != NULL) { 223 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 224 } 225 226 // If the receiver is a constant null, do not torture the system 227 // by attempting to call through it. The compile will proceed 228 // correctly, but may bail out in final_graph_reshaping, because 229 // the call instruction will have a seemingly deficient out-count. 230 // (The bailout says something misleading about an "infinite loop".) 231 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 232 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); 233 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 234 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 235 kit.inc_sp(arg_size); // restore arguments 236 kit.uncommon_trap(Deoptimization::Reason_null_check, 237 Deoptimization::Action_none, 238 NULL, "null receiver"); 239 return kit.transfer_exceptions_into_jvms(); 240 } 241 242 // Ideally we would unconditionally do a null check here and let it 243 // be converted to an implicit check based on profile information. 244 // However currently the conversion to implicit null checks in 245 // Block::implicit_null_check() only looks for loads and stores, not calls. 246 ciMethod *caller = kit.method(); 247 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 248 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 249 ((ImplicitNullCheckThreshold > 0) && caller_md && 250 (caller_md->trap_count(Deoptimization::Reason_null_check) 251 >= (uint)ImplicitNullCheckThreshold))) { 252 // Make an explicit receiver null_check as part of this call. 253 // Since we share a map with the caller, his JVMS gets adjusted. 254 receiver = kit.null_check_receiver_before_call(method()); 255 if (kit.stopped()) { 256 // And dump it back to the caller, decorated with any exceptions: 257 return kit.transfer_exceptions_into_jvms(); 258 } 259 } 260 261 assert(!method()->is_static(), "virtual call must not be to static"); 262 assert(!method()->is_final(), "virtual call should not be to final"); 263 assert(!method()->is_private(), "virtual call should not be to private"); 264 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 265 "no vtable calls if +UseInlineCaches "); 266 address target = SharedRuntime::get_resolve_virtual_call_stub(); 267 // Normal inline cache used for call 268 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 269 if (is_inlined_method_handle_intrinsic(jvms, method())) { 270 // To be able to issue a direct call (optimized virtual or virtual) 271 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information 272 // about the method being invoked should be attached to the call site to 273 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C). 274 call->set_override_symbolic_info(true); 275 } 276 kit.set_arguments_for_java_call(call); 277 kit.set_edges_for_java_call(call); 278 Node* ret = kit.set_results_for_java_call(call); 279 // Check if return value is a value type pointer 280 if (gvn.type(ret)->isa_valuetypeptr()) { 281 // Create ValueTypeNode from the oop and replace the return value 282 Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret); 283 kit.push_node(T_VALUETYPE, vt); 284 } else { 285 kit.push_node(method()->return_type()->basic_type(), ret); 286 } 287 288 // Represent the effect of an implicit receiver null_check 289 // as part of this call. Since we share a map with the caller, 290 // his JVMS gets adjusted. 291 kit.cast_not_null(receiver); 292 return kit.transfer_exceptions_into_jvms(); 293 } 294 295 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 296 if (InlineTree::check_can_parse(m) != NULL) return NULL; 297 return new ParseGenerator(m, expected_uses); 298 } 299 300 // As a special case, the JVMS passed to this CallGenerator is 301 // for the method execution already in progress, not just the JVMS 302 // of the caller. Thus, this CallGenerator cannot be mixed with others! 303 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 304 if (InlineTree::check_can_parse(m) != NULL) return NULL; 305 float past_uses = m->interpreter_invocation_count(); 306 float expected_uses = past_uses; 307 return new ParseGenerator(m, expected_uses, true); 308 } 309 310 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 311 assert(!m->is_abstract(), "for_direct_call mismatch"); 312 return new DirectCallGenerator(m, separate_io_proj); 313 } 314 315 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 316 assert(!m->is_static(), "for_virtual_call mismatch"); 317 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 318 return new VirtualCallGenerator(m, vtable_index); 319 } 320 321 // Allow inlining decisions to be delayed 322 class LateInlineCallGenerator : public DirectCallGenerator { 323 private: 324 // unique id for log compilation 325 jlong _unique_id; 326 327 protected: 328 CallGenerator* _inline_cg; 329 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 330 331 public: 332 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 333 DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {} 334 335 virtual bool is_late_inline() const { return true; } 336 337 // Convert the CallStaticJava into an inline 338 virtual void do_late_inline(); 339 340 virtual JVMState* generate(JVMState* jvms) { 341 Compile *C = Compile::current(); 342 343 C->log_inline_id(this); 344 345 // Record that this call site should be revisited once the main 346 // parse is finished. 347 if (!is_mh_late_inline()) { 348 C->add_late_inline(this); 349 } 350 351 // Emit the CallStaticJava and request separate projections so 352 // that the late inlining logic can distinguish between fall 353 // through and exceptional uses of the memory and io projections 354 // as is done for allocations and macro expansion. 355 return DirectCallGenerator::generate(jvms); 356 } 357 358 virtual void print_inlining_late(const char* msg) { 359 CallNode* call = call_node(); 360 Compile* C = Compile::current(); 361 C->print_inlining_assert_ready(); 362 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 363 C->print_inlining_move_to(this); 364 C->print_inlining_update_delayed(this); 365 } 366 367 virtual void set_unique_id(jlong id) { 368 _unique_id = id; 369 } 370 371 virtual jlong unique_id() const { 372 return _unique_id; 373 } 374 }; 375 376 void LateInlineCallGenerator::do_late_inline() { 377 // Can't inline it 378 CallStaticJavaNode* call = call_node(); 379 if (call == NULL || call->outcnt() == 0 || 380 call->in(0) == NULL || call->in(0)->is_top()) { 381 return; 382 } 383 384 const TypeTuple *r = call->tf()->domain_cc(); 385 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 386 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 387 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 388 return; 389 } 390 } 391 392 if (call->in(TypeFunc::Memory)->is_top()) { 393 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 394 return; 395 } 396 397 Compile* C = Compile::current(); 398 // Remove inlined methods from Compiler's lists. 399 if (call->is_macro()) { 400 C->remove_macro_node(call); 401 } 402 403 // Make a clone of the JVMState that appropriate to use for driving a parse 404 JVMState* old_jvms = call->jvms(); 405 JVMState* jvms = old_jvms->clone_shallow(C); 406 uint size = call->req(); 407 SafePointNode* map = new SafePointNode(size, jvms); 408 for (uint i1 = 0; i1 < size; i1++) { 409 map->init_req(i1, call->in(i1)); 410 } 411 412 PhaseGVN& gvn = *C->initial_gvn(); 413 // Make sure the state is a MergeMem for parsing. 414 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 415 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); 416 gvn.set_type_bottom(mem); 417 map->set_req(TypeFunc::Memory, mem); 418 } 419 420 // blow away old call arguments 421 Node* top = C->top(); 422 for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) { 423 map->set_req(i1, top); 424 } 425 jvms->set_map(map); 426 427 // Make enough space in the expression stack to transfer 428 // the incoming arguments and return value. 429 map->ensure_stack(jvms, jvms->method()->max_stack()); 430 const TypeTuple *domain_sig = call->_tf->domain_sig(); 431 uint nargs = method()->arg_size(); 432 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature"); 433 434 uint j = TypeFunc::Parms; 435 for (uint i1 = 0; i1 < nargs; i1++) { 436 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1); 437 if (!ValueTypePassFieldsAsArgs) { 438 Node* arg = call->in(TypeFunc::Parms + i1); 439 if (t->isa_valuetypeptr()) { 440 arg = ValueTypeNode::make(gvn, map->memory(), arg); 441 } 442 map->set_argument(jvms, i1, arg); 443 } else { 444 if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { 445 ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass(); 446 Node* vt = ValueTypeNode::make(gvn, call, vk, j, true); 447 map->set_argument(jvms, i1, gvn.transform(vt)); 448 j += vk->value_arg_slots(); 449 } else { 450 map->set_argument(jvms, i1, call->in(j)); 451 j++; 452 } 453 } 454 } 455 456 C->print_inlining_assert_ready(); 457 458 C->print_inlining_move_to(this); 459 460 C->log_late_inline(this); 461 462 // This check is done here because for_method_handle_inline() method 463 // needs jvms for inlined state. 464 if (!do_late_inline_check(jvms)) { 465 map->disconnect_inputs(NULL, C); 466 return; 467 } 468 469 // Setup default node notes to be picked up by the inlining 470 Node_Notes* old_nn = C->node_notes_at(call->_idx); 471 if (old_nn != NULL) { 472 Node_Notes* entry_nn = old_nn->clone(C); 473 entry_nn->set_jvms(jvms); 474 C->set_default_node_notes(entry_nn); 475 } 476 477 // Now perform the inlining using the synthesized JVMState 478 JVMState* new_jvms = _inline_cg->generate(jvms); 479 if (new_jvms == NULL) return; // no change 480 if (C->failing()) return; 481 482 // Capture any exceptional control flow 483 GraphKit kit(new_jvms); 484 485 // Find the result object 486 Node* result = C->top(); 487 ciType* return_type = _inline_cg->method()->return_type(); 488 int result_size = return_type->size(); 489 if (result_size != 0 && !kit.stopped()) { 490 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 491 } 492 493 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 494 C->env()->notice_inlined_method(_inline_cg->method()); 495 C->set_inlining_progress(true); 496 497 if (return_type->is_valuetype()) { 498 const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms); 499 if (result->is_ValueType()) { 500 ValueTypeNode* vt = result->as_ValueType(); 501 if (!call->tf()->returns_value_type_as_fields()) { 502 result = vt->allocate(&kit); 503 result = C->initial_gvn()->transform(new ValueTypePtrNode(vt, result, C)); 504 } else { 505 // Return of multiple values (the fields of a value type) 506 vt->replace_call_results(call, C); 507 if (gvn.type(vt->get_oop()) == TypePtr::NULL_PTR) { 508 result = vt->tagged_klass(gvn); 509 } else { 510 result = vt->get_oop(); 511 } 512 } 513 } else { 514 if (vt_t->is_valuetypeptr()->value_type()->value_klass() != C->env()->___Value_klass()) { 515 if (gvn.type(result)->isa_valuetypeptr() && call->tf()->returns_value_type_as_fields()) { 516 Node* cast = new CheckCastPPNode(NULL, result, vt_t); 517 gvn.record_for_igvn(cast); 518 ValueTypePtrNode* vtptr = ValueTypePtrNode::make(gvn, kit.merged_memory(), gvn.transform(cast)); 519 vtptr->replace_call_results(call, C); 520 result = cast; 521 } else { 522 assert(result->is_top(), "what else?"); 523 for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { 524 ProjNode *pn = call->fast_out(i)->as_Proj(); 525 uint con = pn->_con; 526 if (con >= TypeFunc::Parms) { 527 C->initial_gvn()->hash_delete(pn); 528 pn->set_req(0, C->top()); 529 --i; --imax; 530 } 531 } 532 } 533 } 534 } 535 } 536 537 kit.replace_call(call, result, true); 538 } 539 540 541 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 542 return new LateInlineCallGenerator(method, inline_cg); 543 } 544 545 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 546 ciMethod* _caller; 547 int _attempt; 548 bool _input_not_const; 549 550 virtual bool do_late_inline_check(JVMState* jvms); 551 virtual bool already_attempted() const { return _attempt > 0; } 552 553 public: 554 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 555 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 556 557 virtual bool is_mh_late_inline() const { return true; } 558 559 virtual JVMState* generate(JVMState* jvms) { 560 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 561 562 Compile* C = Compile::current(); 563 if (_input_not_const) { 564 // inlining won't be possible so no need to enqueue right now. 565 call_node()->set_generator(this); 566 } else { 567 C->add_late_inline(this); 568 } 569 return new_jvms; 570 } 571 }; 572 573 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 574 575 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const, AlwaysIncrementalInline); 576 577 Compile::current()->print_inlining_update_delayed(this); 578 579 if (!_input_not_const) { 580 _attempt++; 581 } 582 583 if (cg != NULL && (cg->is_inline() || cg->is_inlined_method_handle_intrinsic(jvms, cg->method()))) { 584 assert(!cg->is_late_inline(), "we're doing late inlining"); 585 _inline_cg = cg; 586 Compile::current()->dec_number_of_mh_late_inlines(); 587 return true; 588 } 589 590 call_node()->set_generator(this); 591 return false; 592 } 593 594 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 595 Compile::current()->inc_number_of_mh_late_inlines(); 596 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 597 return cg; 598 } 599 600 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 601 602 public: 603 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 604 LateInlineCallGenerator(method, inline_cg) {} 605 606 virtual JVMState* generate(JVMState* jvms) { 607 Compile *C = Compile::current(); 608 609 C->log_inline_id(this); 610 611 C->add_string_late_inline(this); 612 613 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 614 return new_jvms; 615 } 616 617 virtual bool is_string_late_inline() const { return true; } 618 }; 619 620 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 621 return new LateInlineStringCallGenerator(method, inline_cg); 622 } 623 624 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 625 626 public: 627 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 628 LateInlineCallGenerator(method, inline_cg) {} 629 630 virtual JVMState* generate(JVMState* jvms) { 631 Compile *C = Compile::current(); 632 633 C->log_inline_id(this); 634 635 C->add_boxing_late_inline(this); 636 637 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 638 return new_jvms; 639 } 640 }; 641 642 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 643 return new LateInlineBoxingCallGenerator(method, inline_cg); 644 } 645 646 //---------------------------WarmCallGenerator-------------------------------- 647 // Internal class which handles initial deferral of inlining decisions. 648 class WarmCallGenerator : public CallGenerator { 649 WarmCallInfo* _call_info; 650 CallGenerator* _if_cold; 651 CallGenerator* _if_hot; 652 bool _is_virtual; // caches virtuality of if_cold 653 bool _is_inline; // caches inline-ness of if_hot 654 655 public: 656 WarmCallGenerator(WarmCallInfo* ci, 657 CallGenerator* if_cold, 658 CallGenerator* if_hot) 659 : CallGenerator(if_cold->method()) 660 { 661 assert(method() == if_hot->method(), "consistent choices"); 662 _call_info = ci; 663 _if_cold = if_cold; 664 _if_hot = if_hot; 665 _is_virtual = if_cold->is_virtual(); 666 _is_inline = if_hot->is_inline(); 667 } 668 669 virtual bool is_inline() const { return _is_inline; } 670 virtual bool is_virtual() const { return _is_virtual; } 671 virtual bool is_deferred() const { return true; } 672 673 virtual JVMState* generate(JVMState* jvms); 674 }; 675 676 677 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 678 CallGenerator* if_cold, 679 CallGenerator* if_hot) { 680 return new WarmCallGenerator(ci, if_cold, if_hot); 681 } 682 683 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 684 Compile* C = Compile::current(); 685 C->print_inlining_update(this); 686 687 if (C->log() != NULL) { 688 C->log()->elem("warm_call bci='%d'", jvms->bci()); 689 } 690 jvms = _if_cold->generate(jvms); 691 if (jvms != NULL) { 692 Node* m = jvms->map()->control(); 693 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 694 if (m->is_Catch()) m = m->in(0); else m = C->top(); 695 if (m->is_Proj()) m = m->in(0); else m = C->top(); 696 if (m->is_CallJava()) { 697 _call_info->set_call(m->as_Call()); 698 _call_info->set_hot_cg(_if_hot); 699 #ifndef PRODUCT 700 if (PrintOpto || PrintOptoInlining) { 701 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 702 tty->print("WCI: "); 703 _call_info->print(); 704 } 705 #endif 706 _call_info->set_heat(_call_info->compute_heat()); 707 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 708 } 709 } 710 return jvms; 711 } 712 713 void WarmCallInfo::make_hot() { 714 Unimplemented(); 715 } 716 717 void WarmCallInfo::make_cold() { 718 // No action: Just dequeue. 719 } 720 721 722 //------------------------PredictedCallGenerator------------------------------ 723 // Internal class which handles all out-of-line calls checking receiver type. 724 class PredictedCallGenerator : public CallGenerator { 725 ciKlass* _predicted_receiver; 726 CallGenerator* _if_missed; 727 CallGenerator* _if_hit; 728 float _hit_prob; 729 730 public: 731 PredictedCallGenerator(ciKlass* predicted_receiver, 732 CallGenerator* if_missed, 733 CallGenerator* if_hit, float hit_prob) 734 : CallGenerator(if_missed->method()) 735 { 736 // The call profile data may predict the hit_prob as extreme as 0 or 1. 737 // Remove the extremes values from the range. 738 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 739 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 740 741 _predicted_receiver = predicted_receiver; 742 _if_missed = if_missed; 743 _if_hit = if_hit; 744 _hit_prob = hit_prob; 745 } 746 747 virtual bool is_virtual() const { return true; } 748 virtual bool is_inline() const { return _if_hit->is_inline(); } 749 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 750 751 virtual JVMState* generate(JVMState* jvms); 752 }; 753 754 755 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 756 CallGenerator* if_missed, 757 CallGenerator* if_hit, 758 float hit_prob) { 759 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 760 } 761 762 763 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 764 GraphKit kit(jvms); 765 kit.C->print_inlining_update(this); 766 PhaseGVN& gvn = kit.gvn(); 767 // We need an explicit receiver null_check before checking its type. 768 // We share a map with the caller, so his JVMS gets adjusted. 769 Node* receiver = kit.argument(0); 770 CompileLog* log = kit.C->log(); 771 if (log != NULL) { 772 log->elem("predicted_call bci='%d' klass='%d'", 773 jvms->bci(), log->identify(_predicted_receiver)); 774 } 775 776 receiver = kit.null_check_receiver_before_call(method()); 777 if (kit.stopped()) { 778 return kit.transfer_exceptions_into_jvms(); 779 } 780 781 // Make a copy of the replaced nodes in case we need to restore them 782 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 783 replaced_nodes.clone(); 784 785 Node* exact_receiver = receiver; // will get updated in place... 786 Node* slow_ctl = kit.type_check_receiver(receiver, 787 _predicted_receiver, _hit_prob, 788 &exact_receiver); 789 790 SafePointNode* slow_map = NULL; 791 JVMState* slow_jvms = NULL; 792 { PreserveJVMState pjvms(&kit); 793 kit.set_control(slow_ctl); 794 if (!kit.stopped()) { 795 slow_jvms = _if_missed->generate(kit.sync_jvms()); 796 if (kit.failing()) 797 return NULL; // might happen because of NodeCountInliningCutoff 798 assert(slow_jvms != NULL, "must be"); 799 kit.add_exception_states_from(slow_jvms); 800 kit.set_map(slow_jvms->map()); 801 if (!kit.stopped()) 802 slow_map = kit.stop(); 803 } 804 } 805 806 if (kit.stopped()) { 807 // Instance exactly does not matches the desired type. 808 kit.set_jvms(slow_jvms); 809 return kit.transfer_exceptions_into_jvms(); 810 } 811 812 // fall through if the instance exactly matches the desired type 813 kit.replace_in_map(receiver, exact_receiver); 814 815 // Make the hot call: 816 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 817 if (new_jvms == NULL) { 818 // Inline failed, so make a direct call. 819 assert(_if_hit->is_inline(), "must have been a failed inline"); 820 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 821 new_jvms = cg->generate(kit.sync_jvms()); 822 } 823 kit.add_exception_states_from(new_jvms); 824 kit.set_jvms(new_jvms); 825 826 // Need to merge slow and fast? 827 if (slow_map == NULL) { 828 // The fast path is the only path remaining. 829 return kit.transfer_exceptions_into_jvms(); 830 } 831 832 if (kit.stopped()) { 833 // Inlined method threw an exception, so it's just the slow path after all. 834 kit.set_jvms(slow_jvms); 835 return kit.transfer_exceptions_into_jvms(); 836 } 837 838 // There are 2 branches and the replaced nodes are only valid on 839 // one: restore the replaced nodes to what they were before the 840 // branch. 841 kit.map()->set_replaced_nodes(replaced_nodes); 842 843 // Finish the diamond. 844 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 845 RegionNode* region = new RegionNode(3); 846 region->init_req(1, kit.control()); 847 region->init_req(2, slow_map->control()); 848 kit.set_control(gvn.transform(region)); 849 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 850 iophi->set_req(2, slow_map->i_o()); 851 kit.set_i_o(gvn.transform(iophi)); 852 // Merge memory 853 kit.merge_memory(slow_map->merged_memory(), region, 2); 854 // Transform new memory Phis. 855 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 856 Node* phi = mms.memory(); 857 if (phi->is_Phi() && phi->in(0) == region) { 858 mms.set_memory(gvn.transform(phi)); 859 } 860 } 861 uint tos = kit.jvms()->stkoff() + kit.sp(); 862 uint limit = slow_map->req(); 863 for (uint i = TypeFunc::Parms; i < limit; i++) { 864 // Skip unused stack slots; fast forward to monoff(); 865 if (i == tos) { 866 i = kit.jvms()->monoff(); 867 if( i >= limit ) break; 868 } 869 Node* m = kit.map()->in(i); 870 Node* n = slow_map->in(i); 871 if (m != n) { 872 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 873 Node* phi = PhiNode::make(region, m, t); 874 phi->set_req(2, n); 875 kit.map()->set_req(i, gvn.transform(phi)); 876 } 877 } 878 return kit.transfer_exceptions_into_jvms(); 879 } 880 881 882 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 883 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); 884 bool input_not_const; 885 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const, false); 886 Compile* C = Compile::current(); 887 if (cg != NULL) { 888 if (!delayed_forbidden && AlwaysIncrementalInline) { 889 return CallGenerator::for_late_inline(callee, cg); 890 } else { 891 return cg; 892 } 893 } 894 int bci = jvms->bci(); 895 ciCallProfile profile = caller->call_profile_at_bci(bci); 896 int call_site_count = caller->scale_count(profile.count()); 897 898 if (IncrementalInline && (AlwaysIncrementalInline || 899 (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) { 900 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 901 } else { 902 // Out-of-line call. 903 return CallGenerator::for_direct_call(callee); 904 } 905 } 906 907 static void cast_argument(int arg_nb, ciType* t, GraphKit& kit) { 908 PhaseGVN& gvn = kit.gvn(); 909 Node* arg = kit.argument(arg_nb); 910 const Type* arg_type = arg->bottom_type(); 911 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 912 if (t->is_valuetype()) { 913 assert(!(arg_type->isa_valuetype() && t == kit.C->env()->___Value_klass()), "need a pointer to the value type"); 914 if (arg_type->isa_valuetypeptr() && t != kit.C->env()->___Value_klass()) { 915 Node* cast = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 916 Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), cast); 917 kit.set_argument(arg_nb, vt); 918 } else { 919 assert(t == kit.C->env()->___Value_klass() || arg->is_ValueType(), "inconsistent argument"); 920 } 921 } else { 922 if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) { 923 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 924 kit.set_argument(arg_nb, cast_obj); 925 } 926 } 927 } 928 929 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) { 930 GraphKit kit(jvms); 931 PhaseGVN& gvn = kit.gvn(); 932 Compile* C = kit.C; 933 vmIntrinsics::ID iid = callee->intrinsic_id(); 934 input_not_const = true; 935 switch (iid) { 936 case vmIntrinsics::_invokeBasic: 937 { 938 // Get MethodHandle receiver: 939 Node* receiver = kit.argument(0); 940 if (receiver->Opcode() == Op_ConP) { 941 input_not_const = false; 942 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 943 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 944 const int vtable_index = Method::invalid_vtable_index; 945 946 if (!ciMethod::is_consistent_info(callee, target)) { 947 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 948 "signatures mismatch"); 949 return NULL; 950 } 951 952 CallGenerator* cg = C->call_generator(target, vtable_index, 953 false /* call_does_dispatch */, 954 jvms, 955 true /* allow_inline */, 956 PROB_ALWAYS, 957 NULL, 958 true, 959 delayed_forbidden); 960 return cg; 961 } else { 962 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 963 "receiver not constant"); 964 } 965 } 966 break; 967 968 case vmIntrinsics::_linkToVirtual: 969 case vmIntrinsics::_linkToStatic: 970 case vmIntrinsics::_linkToSpecial: 971 case vmIntrinsics::_linkToInterface: 972 { 973 // Get MemberName argument: 974 Node* member_name = kit.argument(callee->arg_size() - 1); 975 if (member_name->Opcode() == Op_ConP) { 976 input_not_const = false; 977 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 978 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 979 980 if (!ciMethod::is_consistent_info(callee, target)) { 981 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 982 "signatures mismatch"); 983 return NULL; 984 } 985 986 // In lambda forms we erase signature types to avoid resolving issues 987 // involving class loaders. When we optimize a method handle invoke 988 // to a direct call we must cast the receiver and arguments to its 989 // actual types. 990 ciSignature* signature = target->signature(); 991 const int receiver_skip = target->is_static() ? 0 : 1; 992 // Cast receiver to its type. 993 if (!target->is_static()) { 994 cast_argument(0, signature->accessing_klass(), kit); 995 } 996 // Cast reference arguments to its type. 997 for (int i = 0, j = 0; i < signature->count(); i++) { 998 ciType* t = signature->type_at(i); 999 if (t->is_klass()) { 1000 cast_argument(receiver_skip + j, t, kit); 1001 } 1002 j += t->size(); // long and double take two slots 1003 } 1004 1005 // Try to get the most accurate receiver type 1006 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 1007 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 1008 int vtable_index = Method::invalid_vtable_index; 1009 bool call_does_dispatch = false; 1010 1011 ciKlass* speculative_receiver_type = NULL; 1012 if (is_virtual_or_interface) { 1013 ciInstanceKlass* klass = target->holder(); 1014 Node* receiver_node = kit.argument(0); 1015 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 1016 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 1017 // optimize_virtual_call() takes 2 different holder 1018 // arguments for a corner case that doesn't apply here (see 1019 // Parse::do_call()) 1020 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass, 1021 target, receiver_type, is_virtual, 1022 call_does_dispatch, vtable_index, // out-parameters 1023 false /* check_access */); 1024 // We lack profiling at this call but type speculation may 1025 // provide us with a type 1026 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; 1027 } 1028 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, 1029 true /* allow_inline */, 1030 PROB_ALWAYS, 1031 speculative_receiver_type, 1032 true, 1033 delayed_forbidden); 1034 return cg; 1035 } else { 1036 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 1037 "member_name not constant"); 1038 } 1039 } 1040 break; 1041 1042 default: 1043 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)); 1044 break; 1045 } 1046 return NULL; 1047 } 1048 1049 1050 //------------------------PredicatedIntrinsicGenerator------------------------------ 1051 // Internal class which handles all predicated Intrinsic calls. 1052 class PredicatedIntrinsicGenerator : public CallGenerator { 1053 CallGenerator* _intrinsic; 1054 CallGenerator* _cg; 1055 1056 public: 1057 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 1058 CallGenerator* cg) 1059 : CallGenerator(cg->method()) 1060 { 1061 _intrinsic = intrinsic; 1062 _cg = cg; 1063 } 1064 1065 virtual bool is_virtual() const { return true; } 1066 virtual bool is_inlined() const { return true; } 1067 virtual bool is_intrinsic() const { return true; } 1068 1069 virtual JVMState* generate(JVMState* jvms); 1070 }; 1071 1072 1073 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 1074 CallGenerator* cg) { 1075 return new PredicatedIntrinsicGenerator(intrinsic, cg); 1076 } 1077 1078 1079 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 1080 // The code we want to generate here is: 1081 // if (receiver == NULL) 1082 // uncommon_Trap 1083 // if (predicate(0)) 1084 // do_intrinsic(0) 1085 // else 1086 // if (predicate(1)) 1087 // do_intrinsic(1) 1088 // ... 1089 // else 1090 // do_java_comp 1091 1092 GraphKit kit(jvms); 1093 PhaseGVN& gvn = kit.gvn(); 1094 1095 CompileLog* log = kit.C->log(); 1096 if (log != NULL) { 1097 log->elem("predicated_intrinsic bci='%d' method='%d'", 1098 jvms->bci(), log->identify(method())); 1099 } 1100 1101 if (!method()->is_static()) { 1102 // We need an explicit receiver null_check before checking its type in predicate. 1103 // We share a map with the caller, so his JVMS gets adjusted. 1104 Node* receiver = kit.null_check_receiver_before_call(method()); 1105 if (kit.stopped()) { 1106 return kit.transfer_exceptions_into_jvms(); 1107 } 1108 } 1109 1110 int n_predicates = _intrinsic->predicates_count(); 1111 assert(n_predicates > 0, "sanity"); 1112 1113 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 1114 1115 // Region for normal compilation code if intrinsic failed. 1116 Node* slow_region = new RegionNode(1); 1117 1118 int results = 0; 1119 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 1120 #ifdef ASSERT 1121 JVMState* old_jvms = kit.jvms(); 1122 SafePointNode* old_map = kit.map(); 1123 Node* old_io = old_map->i_o(); 1124 Node* old_mem = old_map->memory(); 1125 Node* old_exc = old_map->next_exception(); 1126 #endif 1127 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 1128 #ifdef ASSERT 1129 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 1130 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 1131 SafePointNode* new_map = kit.map(); 1132 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 1133 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 1134 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 1135 #endif 1136 if (!kit.stopped()) { 1137 PreserveJVMState pjvms(&kit); 1138 // Generate intrinsic code: 1139 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 1140 if (new_jvms == NULL) { 1141 // Intrinsic failed, use normal compilation path for this predicate. 1142 slow_region->add_req(kit.control()); 1143 } else { 1144 kit.add_exception_states_from(new_jvms); 1145 kit.set_jvms(new_jvms); 1146 if (!kit.stopped()) { 1147 result_jvms[results++] = kit.jvms(); 1148 } 1149 } 1150 } 1151 if (else_ctrl == NULL) { 1152 else_ctrl = kit.C->top(); 1153 } 1154 kit.set_control(else_ctrl); 1155 } 1156 if (!kit.stopped()) { 1157 // Final 'else' after predicates. 1158 slow_region->add_req(kit.control()); 1159 } 1160 if (slow_region->req() > 1) { 1161 PreserveJVMState pjvms(&kit); 1162 // Generate normal compilation code: 1163 kit.set_control(gvn.transform(slow_region)); 1164 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1165 if (kit.failing()) 1166 return NULL; // might happen because of NodeCountInliningCutoff 1167 assert(new_jvms != NULL, "must be"); 1168 kit.add_exception_states_from(new_jvms); 1169 kit.set_jvms(new_jvms); 1170 if (!kit.stopped()) { 1171 result_jvms[results++] = kit.jvms(); 1172 } 1173 } 1174 1175 if (results == 0) { 1176 // All paths ended in uncommon traps. 1177 (void) kit.stop(); 1178 return kit.transfer_exceptions_into_jvms(); 1179 } 1180 1181 if (results == 1) { // Only one path 1182 kit.set_jvms(result_jvms[0]); 1183 return kit.transfer_exceptions_into_jvms(); 1184 } 1185 1186 // Merge all paths. 1187 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1188 RegionNode* region = new RegionNode(results + 1); 1189 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1190 for (int i = 0; i < results; i++) { 1191 JVMState* jvms = result_jvms[i]; 1192 int path = i + 1; 1193 SafePointNode* map = jvms->map(); 1194 region->init_req(path, map->control()); 1195 iophi->set_req(path, map->i_o()); 1196 if (i == 0) { 1197 kit.set_jvms(jvms); 1198 } else { 1199 kit.merge_memory(map->merged_memory(), region, path); 1200 } 1201 } 1202 kit.set_control(gvn.transform(region)); 1203 kit.set_i_o(gvn.transform(iophi)); 1204 // Transform new memory Phis. 1205 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1206 Node* phi = mms.memory(); 1207 if (phi->is_Phi() && phi->in(0) == region) { 1208 mms.set_memory(gvn.transform(phi)); 1209 } 1210 } 1211 1212 // Merge debug info. 1213 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1214 uint tos = kit.jvms()->stkoff() + kit.sp(); 1215 Node* map = kit.map(); 1216 uint limit = map->req(); 1217 for (uint i = TypeFunc::Parms; i < limit; i++) { 1218 // Skip unused stack slots; fast forward to monoff(); 1219 if (i == tos) { 1220 i = kit.jvms()->monoff(); 1221 if( i >= limit ) break; 1222 } 1223 Node* n = map->in(i); 1224 ins[0] = n; 1225 const Type* t = gvn.type(n); 1226 bool needs_phi = false; 1227 for (int j = 1; j < results; j++) { 1228 JVMState* jvms = result_jvms[j]; 1229 Node* jmap = jvms->map(); 1230 Node* m = NULL; 1231 if (jmap->req() > i) { 1232 m = jmap->in(i); 1233 if (m != n) { 1234 needs_phi = true; 1235 t = t->meet_speculative(gvn.type(m)); 1236 } 1237 } 1238 ins[j] = m; 1239 } 1240 if (needs_phi) { 1241 Node* phi = PhiNode::make(region, n, t); 1242 for (int j = 1; j < results; j++) { 1243 phi->set_req(j + 1, ins[j]); 1244 } 1245 map->set_req(i, gvn.transform(phi)); 1246 } 1247 } 1248 1249 return kit.transfer_exceptions_into_jvms(); 1250 } 1251 1252 //-------------------------UncommonTrapCallGenerator----------------------------- 1253 // Internal class which handles all out-of-line calls checking receiver type. 1254 class UncommonTrapCallGenerator : public CallGenerator { 1255 Deoptimization::DeoptReason _reason; 1256 Deoptimization::DeoptAction _action; 1257 1258 public: 1259 UncommonTrapCallGenerator(ciMethod* m, 1260 Deoptimization::DeoptReason reason, 1261 Deoptimization::DeoptAction action) 1262 : CallGenerator(m) 1263 { 1264 _reason = reason; 1265 _action = action; 1266 } 1267 1268 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1269 virtual bool is_trap() const { return true; } 1270 1271 virtual JVMState* generate(JVMState* jvms); 1272 }; 1273 1274 1275 CallGenerator* 1276 CallGenerator::for_uncommon_trap(ciMethod* m, 1277 Deoptimization::DeoptReason reason, 1278 Deoptimization::DeoptAction action) { 1279 return new UncommonTrapCallGenerator(m, reason, action); 1280 } 1281 1282 1283 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1284 GraphKit kit(jvms); 1285 kit.C->print_inlining_update(this); 1286 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1287 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 1288 // Use callsite signature always. 1289 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 1290 int nargs = declared_method->arg_size(); 1291 kit.inc_sp(nargs); 1292 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1293 if (_reason == Deoptimization::Reason_class_check && 1294 _action == Deoptimization::Action_maybe_recompile) { 1295 // Temp fix for 6529811 1296 // Don't allow uncommon_trap to override our decision to recompile in the event 1297 // of a class cast failure for a monomorphic call as it will never let us convert 1298 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1299 bool keep_exact_action = true; 1300 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1301 } else { 1302 kit.uncommon_trap(_reason, _action); 1303 } 1304 return kit.transfer_exceptions_into_jvms(); 1305 } 1306 1307 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1308 1309 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1310 1311 #define NODES_OVERHEAD_PER_METHOD (30.0) 1312 #define NODES_PER_BYTECODE (9.5) 1313 1314 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1315 int call_count = profile.count(); 1316 int code_size = call_method->code_size(); 1317 1318 // Expected execution count is based on the historical count: 1319 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1320 1321 // Expected profit from inlining, in units of simple call-overheads. 1322 _profit = 1.0; 1323 1324 // Expected work performed by the call in units of call-overheads. 1325 // %%% need an empirical curve fit for "work" (time in call) 1326 float bytecodes_per_call = 3; 1327 _work = 1.0 + code_size / bytecodes_per_call; 1328 1329 // Expected size of compilation graph: 1330 // -XX:+PrintParseStatistics once reported: 1331 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1332 // Histogram of 144298 parsed bytecodes: 1333 // %%% Need an better predictor for graph size. 1334 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1335 } 1336 1337 // is_cold: Return true if the node should never be inlined. 1338 // This is true if any of the key metrics are extreme. 1339 bool WarmCallInfo::is_cold() const { 1340 if (count() < WarmCallMinCount) return true; 1341 if (profit() < WarmCallMinProfit) return true; 1342 if (work() > WarmCallMaxWork) return true; 1343 if (size() > WarmCallMaxSize) return true; 1344 return false; 1345 } 1346 1347 // is_hot: Return true if the node should be inlined immediately. 1348 // This is true if any of the key metrics are extreme. 1349 bool WarmCallInfo::is_hot() const { 1350 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1351 if (count() >= HotCallCountThreshold) return true; 1352 if (profit() >= HotCallProfitThreshold) return true; 1353 if (work() <= HotCallTrivialWork) return true; 1354 if (size() <= HotCallTrivialSize) return true; 1355 return false; 1356 } 1357 1358 // compute_heat: 1359 float WarmCallInfo::compute_heat() const { 1360 assert(!is_cold(), "compute heat only on warm nodes"); 1361 assert(!is_hot(), "compute heat only on warm nodes"); 1362 int min_size = MAX2(0, (int)HotCallTrivialSize); 1363 int max_size = MIN2(500, (int)WarmCallMaxSize); 1364 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1365 float size_factor; 1366 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1367 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1368 else if (method_size < 0.5) size_factor = 1; // better than avg. 1369 else size_factor = 0.5; // worse than avg. 1370 return (count() * profit() * size_factor); 1371 } 1372 1373 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1374 assert(this != that, "compare only different WCIs"); 1375 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1376 if (this->heat() > that->heat()) return true; 1377 if (this->heat() < that->heat()) return false; 1378 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1379 // Equal heat. Break the tie some other way. 1380 if (!this->call() || !that->call()) return (address)this > (address)that; 1381 return this->call()->_idx > that->call()->_idx; 1382 } 1383 1384 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1385 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1386 1387 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1388 assert(next() == UNINIT_NEXT, "not yet on any list"); 1389 WarmCallInfo* prev_p = NULL; 1390 WarmCallInfo* next_p = head; 1391 while (next_p != NULL && next_p->warmer_than(this)) { 1392 prev_p = next_p; 1393 next_p = prev_p->next(); 1394 } 1395 // Install this between prev_p and next_p. 1396 this->set_next(next_p); 1397 if (prev_p == NULL) 1398 head = this; 1399 else 1400 prev_p->set_next(this); 1401 return head; 1402 } 1403 1404 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1405 WarmCallInfo* prev_p = NULL; 1406 WarmCallInfo* next_p = head; 1407 while (next_p != this) { 1408 assert(next_p != NULL, "this must be in the list somewhere"); 1409 prev_p = next_p; 1410 next_p = prev_p->next(); 1411 } 1412 next_p = this->next(); 1413 debug_only(this->set_next(UNINIT_NEXT)); 1414 // Remove this from between prev_p and next_p. 1415 if (prev_p == NULL) 1416 head = next_p; 1417 else 1418 prev_p->set_next(next_p); 1419 return head; 1420 } 1421 1422 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1423 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1424 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1425 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1426 1427 WarmCallInfo* WarmCallInfo::always_hot() { 1428 assert(_always_hot.is_hot(), "must always be hot"); 1429 return &_always_hot; 1430 } 1431 1432 WarmCallInfo* WarmCallInfo::always_cold() { 1433 assert(_always_cold.is_cold(), "must always be cold"); 1434 return &_always_cold; 1435 } 1436 1437 1438 #ifndef PRODUCT 1439 1440 void WarmCallInfo::print() const { 1441 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1442 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1443 count(), profit(), work(), size(), compute_heat(), next()); 1444 tty->cr(); 1445 if (call() != NULL) call()->dump(); 1446 } 1447 1448 void print_wci(WarmCallInfo* ci) { 1449 ci->print(); 1450 } 1451 1452 void WarmCallInfo::print_all() const { 1453 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1454 p->print(); 1455 } 1456 1457 int WarmCallInfo::count_all() const { 1458 int cnt = 0; 1459 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1460 cnt++; 1461 return cnt; 1462 } 1463 1464 #endif //PRODUCT