1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 #include "opto/valuetypenode.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 45 // Utility function. 46 const TypeFunc* CallGenerator::tf() const { 47 return TypeFunc::make(method()); 48 } 49 50 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* callee) { 51 ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci()); 52 return symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic(); 53 } 54 55 //-----------------------------ParseGenerator--------------------------------- 56 // Internal class which handles all direct bytecode traversal. 57 class ParseGenerator : public InlineCallGenerator { 58 private: 59 bool _is_osr; 60 float _expected_uses; 61 62 public: 63 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 64 : InlineCallGenerator(method) 65 { 66 _is_osr = is_osr; 67 _expected_uses = expected_uses; 68 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 69 } 70 71 virtual bool is_parse() const { return true; } 72 virtual JVMState* generate(JVMState* jvms); 73 int is_osr() { return _is_osr; } 74 75 }; 76 77 JVMState* ParseGenerator::generate(JVMState* jvms) { 78 Compile* C = Compile::current(); 79 C->print_inlining_update(this); 80 81 if (is_osr()) { 82 // The JVMS for a OSR has a single argument (see its TypeFunc). 83 assert(jvms->depth() == 1, "no inline OSR"); 84 } 85 86 if (C->failing()) { 87 return NULL; // bailing out of the compile; do not try to parse 88 } 89 90 Parse parser(jvms, method(), _expected_uses); 91 // Grab signature for matching/allocation 92 #ifdef ASSERT 93 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 94 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 95 assert(C->env()->system_dictionary_modification_counter_changed(), 96 "Must invalidate if TypeFuncs differ"); 97 } 98 #endif 99 100 GraphKit& exits = parser.exits(); 101 102 if (C->failing()) { 103 while (exits.pop_exception_state() != NULL) ; 104 return NULL; 105 } 106 107 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 108 109 // Simply return the exit state of the parser, 110 // augmented by any exceptional states. 111 return exits.transfer_exceptions_into_jvms(); 112 } 113 114 //---------------------------DirectCallGenerator------------------------------ 115 // Internal class which handles all out-of-line calls w/o receiver type checks. 116 class DirectCallGenerator : public CallGenerator { 117 private: 118 CallStaticJavaNode* _call_node; 119 // Force separate memory and I/O projections for the exceptional 120 // paths to facilitate late inlinig. 121 bool _separate_io_proj; 122 123 public: 124 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 125 : CallGenerator(method), 126 _separate_io_proj(separate_io_proj) 127 { 128 } 129 virtual JVMState* generate(JVMState* jvms); 130 131 CallStaticJavaNode* call_node() const { return _call_node; } 132 }; 133 134 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 135 GraphKit kit(jvms); 136 kit.C->print_inlining_update(this); 137 PhaseGVN& gvn = kit.gvn(); 138 bool is_static = method()->is_static(); 139 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 140 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 141 142 if (kit.C->log() != NULL) { 143 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 144 } 145 146 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 147 if (is_inlined_method_handle_intrinsic(jvms, method())) { 148 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter, 149 // additional information about the method being invoked should be attached 150 // to the call site to make resolution logic work 151 // (see SharedRuntime::resolve_static_call_C). 152 call->set_override_symbolic_info(true); 153 } 154 _call_node = call; // Save the call node in case we need it later 155 if (!is_static) { 156 if (!kit.argument(0)->is_ValueType()) { 157 // Make an explicit receiver null_check as part of this call. 158 // Since we share a map with the caller, his JVMS gets adjusted. 159 kit.null_check_receiver_before_call(method()); 160 } 161 if (kit.stopped()) { 162 // And dump it back to the caller, decorated with any exceptions: 163 return kit.transfer_exceptions_into_jvms(); 164 } 165 // Mark the call node as virtual, sort of: 166 call->set_optimized_virtual(true); 167 if (method()->is_method_handle_intrinsic() || 168 method()->is_compiled_lambda_form()) { 169 call->set_method_handle_invoke(true); 170 } 171 } 172 kit.set_arguments_for_java_call(call); 173 kit.set_edges_for_java_call(call, false, _separate_io_proj); 174 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 175 // Check if return value is a value type pointer 176 if (gvn.type(ret)->isa_valuetypeptr()) { 177 // Create ValueTypeNode from the oop and replace the return value 178 Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret); 179 kit.push_node(T_VALUETYPE, vt); 180 } else { 181 kit.push_node(method()->return_type()->basic_type(), ret); 182 } 183 return kit.transfer_exceptions_into_jvms(); 184 } 185 186 //--------------------------VirtualCallGenerator------------------------------ 187 // Internal class which handles all out-of-line calls checking receiver type. 188 class VirtualCallGenerator : public CallGenerator { 189 private: 190 int _vtable_index; 191 public: 192 VirtualCallGenerator(ciMethod* method, int vtable_index) 193 : CallGenerator(method), _vtable_index(vtable_index) 194 { 195 assert(vtable_index == Method::invalid_vtable_index || 196 vtable_index >= 0, "either invalid or usable"); 197 } 198 virtual bool is_virtual() const { return true; } 199 virtual JVMState* generate(JVMState* jvms); 200 }; 201 202 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 203 GraphKit kit(jvms); 204 Node* receiver = kit.argument(0); 205 PhaseGVN& gvn = kit.gvn(); 206 kit.C->print_inlining_update(this); 207 208 if (kit.C->log() != NULL) { 209 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 210 } 211 212 // If the receiver is a constant null, do not torture the system 213 // by attempting to call through it. The compile will proceed 214 // correctly, but may bail out in final_graph_reshaping, because 215 // the call instruction will have a seemingly deficient out-count. 216 // (The bailout says something misleading about an "infinite loop".) 217 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 218 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); 219 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 220 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 221 kit.inc_sp(arg_size); // restore arguments 222 kit.uncommon_trap(Deoptimization::Reason_null_check, 223 Deoptimization::Action_none, 224 NULL, "null receiver"); 225 return kit.transfer_exceptions_into_jvms(); 226 } 227 228 // Ideally we would unconditionally do a null check here and let it 229 // be converted to an implicit check based on profile information. 230 // However currently the conversion to implicit null checks in 231 // Block::implicit_null_check() only looks for loads and stores, not calls. 232 ciMethod *caller = kit.method(); 233 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 234 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 235 ((ImplicitNullCheckThreshold > 0) && caller_md && 236 (caller_md->trap_count(Deoptimization::Reason_null_check) 237 >= (uint)ImplicitNullCheckThreshold))) { 238 // Make an explicit receiver null_check as part of this call. 239 // Since we share a map with the caller, his JVMS gets adjusted. 240 receiver = kit.null_check_receiver_before_call(method()); 241 if (kit.stopped()) { 242 // And dump it back to the caller, decorated with any exceptions: 243 return kit.transfer_exceptions_into_jvms(); 244 } 245 } 246 247 assert(!method()->is_static(), "virtual call must not be to static"); 248 assert(!method()->is_final(), "virtual call should not be to final"); 249 assert(!method()->is_private(), "virtual call should not be to private"); 250 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 251 "no vtable calls if +UseInlineCaches "); 252 address target = SharedRuntime::get_resolve_virtual_call_stub(); 253 // Normal inline cache used for call 254 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 255 if (is_inlined_method_handle_intrinsic(jvms, method())) { 256 // To be able to issue a direct call (optimized virtual or virtual) 257 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information 258 // about the method being invoked should be attached to the call site to 259 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C). 260 call->set_override_symbolic_info(true); 261 } 262 kit.set_arguments_for_java_call(call); 263 kit.set_edges_for_java_call(call); 264 Node* ret = kit.set_results_for_java_call(call); 265 // Check if return value is a value type pointer 266 if (gvn.type(ret)->isa_valuetypeptr()) { 267 // Create ValueTypeNode from the oop and replace the return value 268 Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret); 269 kit.push_node(T_VALUETYPE, vt); 270 } else { 271 kit.push_node(method()->return_type()->basic_type(), ret); 272 } 273 274 // Represent the effect of an implicit receiver null_check 275 // as part of this call. Since we share a map with the caller, 276 // his JVMS gets adjusted. 277 kit.cast_not_null(receiver); 278 return kit.transfer_exceptions_into_jvms(); 279 } 280 281 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 282 if (InlineTree::check_can_parse(m) != NULL) return NULL; 283 return new ParseGenerator(m, expected_uses); 284 } 285 286 // As a special case, the JVMS passed to this CallGenerator is 287 // for the method execution already in progress, not just the JVMS 288 // of the caller. Thus, this CallGenerator cannot be mixed with others! 289 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 290 if (InlineTree::check_can_parse(m) != NULL) return NULL; 291 float past_uses = m->interpreter_invocation_count(); 292 float expected_uses = past_uses; 293 return new ParseGenerator(m, expected_uses, true); 294 } 295 296 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 297 assert(!m->is_abstract(), "for_direct_call mismatch"); 298 return new DirectCallGenerator(m, separate_io_proj); 299 } 300 301 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 302 assert(!m->is_static(), "for_virtual_call mismatch"); 303 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 304 return new VirtualCallGenerator(m, vtable_index); 305 } 306 307 // Allow inlining decisions to be delayed 308 class LateInlineCallGenerator : public DirectCallGenerator { 309 private: 310 // unique id for log compilation 311 jlong _unique_id; 312 313 protected: 314 CallGenerator* _inline_cg; 315 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 316 317 public: 318 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 319 DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {} 320 321 virtual bool is_late_inline() const { return true; } 322 323 // Convert the CallStaticJava into an inline 324 virtual void do_late_inline(); 325 326 virtual JVMState* generate(JVMState* jvms) { 327 Compile *C = Compile::current(); 328 329 C->log_inline_id(this); 330 331 // Record that this call site should be revisited once the main 332 // parse is finished. 333 if (!is_mh_late_inline()) { 334 C->add_late_inline(this); 335 } 336 337 // Emit the CallStaticJava and request separate projections so 338 // that the late inlining logic can distinguish between fall 339 // through and exceptional uses of the memory and io projections 340 // as is done for allocations and macro expansion. 341 return DirectCallGenerator::generate(jvms); 342 } 343 344 virtual void print_inlining_late(const char* msg) { 345 CallNode* call = call_node(); 346 Compile* C = Compile::current(); 347 C->print_inlining_assert_ready(); 348 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 349 C->print_inlining_move_to(this); 350 C->print_inlining_update_delayed(this); 351 } 352 353 virtual void set_unique_id(jlong id) { 354 _unique_id = id; 355 } 356 357 virtual jlong unique_id() const { 358 return _unique_id; 359 } 360 }; 361 362 void LateInlineCallGenerator::do_late_inline() { 363 // Can't inline it 364 CallStaticJavaNode* call = call_node(); 365 if (call == NULL || call->outcnt() == 0 || 366 call->in(0) == NULL || call->in(0)->is_top()) { 367 return; 368 } 369 370 const TypeTuple *r = call->tf()->domain_cc(); 371 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 372 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 373 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 374 return; 375 } 376 } 377 378 if (call->in(TypeFunc::Memory)->is_top()) { 379 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 380 return; 381 } 382 383 Compile* C = Compile::current(); 384 // Remove inlined methods from Compiler's lists. 385 if (call->is_macro()) { 386 C->remove_macro_node(call); 387 } 388 389 // Make a clone of the JVMState that appropriate to use for driving a parse 390 JVMState* old_jvms = call->jvms(); 391 JVMState* jvms = old_jvms->clone_shallow(C); 392 uint size = call->req(); 393 SafePointNode* map = new SafePointNode(size, jvms); 394 for (uint i1 = 0; i1 < size; i1++) { 395 map->init_req(i1, call->in(i1)); 396 } 397 398 PhaseGVN& gvn = *C->initial_gvn(); 399 // Make sure the state is a MergeMem for parsing. 400 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 401 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); 402 gvn.set_type_bottom(mem); 403 map->set_req(TypeFunc::Memory, mem); 404 } 405 406 // blow away old call arguments 407 Node* top = C->top(); 408 for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) { 409 map->set_req(i1, top); 410 } 411 jvms->set_map(map); 412 413 // Make enough space in the expression stack to transfer 414 // the incoming arguments and return value. 415 map->ensure_stack(jvms, jvms->method()->max_stack()); 416 const TypeTuple *domain_sig = call->_tf->domain_sig(); 417 uint nargs = method()->arg_size(); 418 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature"); 419 420 uint j = TypeFunc::Parms; 421 for (uint i1 = 0; i1 < nargs; i1++) { 422 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1); 423 if (!ValueTypePassFieldsAsArgs) { 424 Node* arg = call->in(TypeFunc::Parms + i1); 425 if (t->isa_valuetypeptr()) { 426 arg = ValueTypeNode::make(gvn, map->memory(), arg); 427 } 428 map->set_argument(jvms, i1, arg); 429 } else { 430 if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { 431 ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass(); 432 Node* vt = C->create_vt_node(call, vk, vk, 0, j); 433 map->set_argument(jvms, i1, gvn.transform(vt)); 434 j += vk->value_arg_slots(); 435 } else { 436 map->set_argument(jvms, i1, call->in(j)); 437 j++; 438 } 439 } 440 } 441 442 C->print_inlining_assert_ready(); 443 444 C->print_inlining_move_to(this); 445 446 C->log_late_inline(this); 447 448 // This check is done here because for_method_handle_inline() method 449 // needs jvms for inlined state. 450 if (!do_late_inline_check(jvms)) { 451 map->disconnect_inputs(NULL, C); 452 return; 453 } 454 455 // Setup default node notes to be picked up by the inlining 456 Node_Notes* old_nn = C->node_notes_at(call->_idx); 457 if (old_nn != NULL) { 458 Node_Notes* entry_nn = old_nn->clone(C); 459 entry_nn->set_jvms(jvms); 460 C->set_default_node_notes(entry_nn); 461 } 462 463 // Now perform the inlining using the synthesized JVMState 464 JVMState* new_jvms = _inline_cg->generate(jvms); 465 if (new_jvms == NULL) return; // no change 466 if (C->failing()) return; 467 468 // Capture any exceptional control flow 469 GraphKit kit(new_jvms); 470 471 // Find the result object 472 Node* result = C->top(); 473 int result_size = method()->return_type()->size(); 474 if (result_size != 0 && !kit.stopped()) { 475 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 476 } 477 478 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 479 C->env()->notice_inlined_method(_inline_cg->method()); 480 C->set_inlining_progress(true); 481 482 if (result->is_ValueType()) { 483 result = result->as_ValueType()->store_to_memory(&kit); 484 } 485 486 kit.replace_call(call, result, true); 487 } 488 489 490 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 491 return new LateInlineCallGenerator(method, inline_cg); 492 } 493 494 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 495 ciMethod* _caller; 496 int _attempt; 497 bool _input_not_const; 498 499 virtual bool do_late_inline_check(JVMState* jvms); 500 virtual bool already_attempted() const { return _attempt > 0; } 501 502 public: 503 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 504 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 505 506 virtual bool is_mh_late_inline() const { return true; } 507 508 virtual JVMState* generate(JVMState* jvms) { 509 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 510 511 Compile* C = Compile::current(); 512 if (_input_not_const) { 513 // inlining won't be possible so no need to enqueue right now. 514 call_node()->set_generator(this); 515 } else { 516 C->add_late_inline(this); 517 } 518 return new_jvms; 519 } 520 }; 521 522 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 523 524 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); 525 526 Compile::current()->print_inlining_update_delayed(this); 527 528 if (!_input_not_const) { 529 _attempt++; 530 } 531 532 if (cg != NULL && cg->is_inline()) { 533 assert(!cg->is_late_inline(), "we're doing late inlining"); 534 _inline_cg = cg; 535 Compile::current()->dec_number_of_mh_late_inlines(); 536 return true; 537 } 538 539 call_node()->set_generator(this); 540 return false; 541 } 542 543 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 544 Compile::current()->inc_number_of_mh_late_inlines(); 545 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 546 return cg; 547 } 548 549 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 550 551 public: 552 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 553 LateInlineCallGenerator(method, inline_cg) {} 554 555 virtual JVMState* generate(JVMState* jvms) { 556 Compile *C = Compile::current(); 557 558 C->log_inline_id(this); 559 560 C->add_string_late_inline(this); 561 562 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 563 return new_jvms; 564 } 565 566 virtual bool is_string_late_inline() const { return true; } 567 }; 568 569 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 570 return new LateInlineStringCallGenerator(method, inline_cg); 571 } 572 573 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 574 575 public: 576 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 577 LateInlineCallGenerator(method, inline_cg) {} 578 579 virtual JVMState* generate(JVMState* jvms) { 580 Compile *C = Compile::current(); 581 582 C->log_inline_id(this); 583 584 C->add_boxing_late_inline(this); 585 586 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 587 return new_jvms; 588 } 589 }; 590 591 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 592 return new LateInlineBoxingCallGenerator(method, inline_cg); 593 } 594 595 //---------------------------WarmCallGenerator-------------------------------- 596 // Internal class which handles initial deferral of inlining decisions. 597 class WarmCallGenerator : public CallGenerator { 598 WarmCallInfo* _call_info; 599 CallGenerator* _if_cold; 600 CallGenerator* _if_hot; 601 bool _is_virtual; // caches virtuality of if_cold 602 bool _is_inline; // caches inline-ness of if_hot 603 604 public: 605 WarmCallGenerator(WarmCallInfo* ci, 606 CallGenerator* if_cold, 607 CallGenerator* if_hot) 608 : CallGenerator(if_cold->method()) 609 { 610 assert(method() == if_hot->method(), "consistent choices"); 611 _call_info = ci; 612 _if_cold = if_cold; 613 _if_hot = if_hot; 614 _is_virtual = if_cold->is_virtual(); 615 _is_inline = if_hot->is_inline(); 616 } 617 618 virtual bool is_inline() const { return _is_inline; } 619 virtual bool is_virtual() const { return _is_virtual; } 620 virtual bool is_deferred() const { return true; } 621 622 virtual JVMState* generate(JVMState* jvms); 623 }; 624 625 626 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 627 CallGenerator* if_cold, 628 CallGenerator* if_hot) { 629 return new WarmCallGenerator(ci, if_cold, if_hot); 630 } 631 632 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 633 Compile* C = Compile::current(); 634 C->print_inlining_update(this); 635 636 if (C->log() != NULL) { 637 C->log()->elem("warm_call bci='%d'", jvms->bci()); 638 } 639 jvms = _if_cold->generate(jvms); 640 if (jvms != NULL) { 641 Node* m = jvms->map()->control(); 642 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 643 if (m->is_Catch()) m = m->in(0); else m = C->top(); 644 if (m->is_Proj()) m = m->in(0); else m = C->top(); 645 if (m->is_CallJava()) { 646 _call_info->set_call(m->as_Call()); 647 _call_info->set_hot_cg(_if_hot); 648 #ifndef PRODUCT 649 if (PrintOpto || PrintOptoInlining) { 650 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 651 tty->print("WCI: "); 652 _call_info->print(); 653 } 654 #endif 655 _call_info->set_heat(_call_info->compute_heat()); 656 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 657 } 658 } 659 return jvms; 660 } 661 662 void WarmCallInfo::make_hot() { 663 Unimplemented(); 664 } 665 666 void WarmCallInfo::make_cold() { 667 // No action: Just dequeue. 668 } 669 670 671 //------------------------PredictedCallGenerator------------------------------ 672 // Internal class which handles all out-of-line calls checking receiver type. 673 class PredictedCallGenerator : public CallGenerator { 674 ciKlass* _predicted_receiver; 675 CallGenerator* _if_missed; 676 CallGenerator* _if_hit; 677 float _hit_prob; 678 679 public: 680 PredictedCallGenerator(ciKlass* predicted_receiver, 681 CallGenerator* if_missed, 682 CallGenerator* if_hit, float hit_prob) 683 : CallGenerator(if_missed->method()) 684 { 685 // The call profile data may predict the hit_prob as extreme as 0 or 1. 686 // Remove the extremes values from the range. 687 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 688 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 689 690 _predicted_receiver = predicted_receiver; 691 _if_missed = if_missed; 692 _if_hit = if_hit; 693 _hit_prob = hit_prob; 694 } 695 696 virtual bool is_virtual() const { return true; } 697 virtual bool is_inline() const { return _if_hit->is_inline(); } 698 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 699 700 virtual JVMState* generate(JVMState* jvms); 701 }; 702 703 704 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 705 CallGenerator* if_missed, 706 CallGenerator* if_hit, 707 float hit_prob) { 708 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 709 } 710 711 712 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 713 GraphKit kit(jvms); 714 kit.C->print_inlining_update(this); 715 PhaseGVN& gvn = kit.gvn(); 716 // We need an explicit receiver null_check before checking its type. 717 // We share a map with the caller, so his JVMS gets adjusted. 718 Node* receiver = kit.argument(0); 719 CompileLog* log = kit.C->log(); 720 if (log != NULL) { 721 log->elem("predicted_call bci='%d' klass='%d'", 722 jvms->bci(), log->identify(_predicted_receiver)); 723 } 724 725 receiver = kit.null_check_receiver_before_call(method()); 726 if (kit.stopped()) { 727 return kit.transfer_exceptions_into_jvms(); 728 } 729 730 // Make a copy of the replaced nodes in case we need to restore them 731 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 732 replaced_nodes.clone(); 733 734 Node* exact_receiver = receiver; // will get updated in place... 735 Node* slow_ctl = kit.type_check_receiver(receiver, 736 _predicted_receiver, _hit_prob, 737 &exact_receiver); 738 739 SafePointNode* slow_map = NULL; 740 JVMState* slow_jvms = NULL; 741 { PreserveJVMState pjvms(&kit); 742 kit.set_control(slow_ctl); 743 if (!kit.stopped()) { 744 slow_jvms = _if_missed->generate(kit.sync_jvms()); 745 if (kit.failing()) 746 return NULL; // might happen because of NodeCountInliningCutoff 747 assert(slow_jvms != NULL, "must be"); 748 kit.add_exception_states_from(slow_jvms); 749 kit.set_map(slow_jvms->map()); 750 if (!kit.stopped()) 751 slow_map = kit.stop(); 752 } 753 } 754 755 if (kit.stopped()) { 756 // Instance exactly does not matches the desired type. 757 kit.set_jvms(slow_jvms); 758 return kit.transfer_exceptions_into_jvms(); 759 } 760 761 // fall through if the instance exactly matches the desired type 762 kit.replace_in_map(receiver, exact_receiver); 763 764 // Make the hot call: 765 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 766 if (new_jvms == NULL) { 767 // Inline failed, so make a direct call. 768 assert(_if_hit->is_inline(), "must have been a failed inline"); 769 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 770 new_jvms = cg->generate(kit.sync_jvms()); 771 } 772 kit.add_exception_states_from(new_jvms); 773 kit.set_jvms(new_jvms); 774 775 // Need to merge slow and fast? 776 if (slow_map == NULL) { 777 // The fast path is the only path remaining. 778 return kit.transfer_exceptions_into_jvms(); 779 } 780 781 if (kit.stopped()) { 782 // Inlined method threw an exception, so it's just the slow path after all. 783 kit.set_jvms(slow_jvms); 784 return kit.transfer_exceptions_into_jvms(); 785 } 786 787 // There are 2 branches and the replaced nodes are only valid on 788 // one: restore the replaced nodes to what they were before the 789 // branch. 790 kit.map()->set_replaced_nodes(replaced_nodes); 791 792 // Finish the diamond. 793 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 794 RegionNode* region = new RegionNode(3); 795 region->init_req(1, kit.control()); 796 region->init_req(2, slow_map->control()); 797 kit.set_control(gvn.transform(region)); 798 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 799 iophi->set_req(2, slow_map->i_o()); 800 kit.set_i_o(gvn.transform(iophi)); 801 // Merge memory 802 kit.merge_memory(slow_map->merged_memory(), region, 2); 803 // Transform new memory Phis. 804 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 805 Node* phi = mms.memory(); 806 if (phi->is_Phi() && phi->in(0) == region) { 807 mms.set_memory(gvn.transform(phi)); 808 } 809 } 810 uint tos = kit.jvms()->stkoff() + kit.sp(); 811 uint limit = slow_map->req(); 812 for (uint i = TypeFunc::Parms; i < limit; i++) { 813 // Skip unused stack slots; fast forward to monoff(); 814 if (i == tos) { 815 i = kit.jvms()->monoff(); 816 if( i >= limit ) break; 817 } 818 Node* m = kit.map()->in(i); 819 Node* n = slow_map->in(i); 820 if (m != n) { 821 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 822 Node* phi = PhiNode::make(region, m, t); 823 phi->set_req(2, n); 824 kit.map()->set_req(i, gvn.transform(phi)); 825 } 826 } 827 return kit.transfer_exceptions_into_jvms(); 828 } 829 830 831 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 832 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); 833 bool input_not_const; 834 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); 835 Compile* C = Compile::current(); 836 if (cg != NULL) { 837 if (!delayed_forbidden && AlwaysIncrementalInline) { 838 return CallGenerator::for_late_inline(callee, cg); 839 } else { 840 return cg; 841 } 842 } 843 int bci = jvms->bci(); 844 ciCallProfile profile = caller->call_profile_at_bci(bci); 845 int call_site_count = caller->scale_count(profile.count()); 846 847 if (IncrementalInline && call_site_count > 0 && 848 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { 849 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 850 } else { 851 // Out-of-line call. 852 return CallGenerator::for_direct_call(callee); 853 } 854 } 855 856 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { 857 GraphKit kit(jvms); 858 PhaseGVN& gvn = kit.gvn(); 859 Compile* C = kit.C; 860 vmIntrinsics::ID iid = callee->intrinsic_id(); 861 input_not_const = true; 862 switch (iid) { 863 case vmIntrinsics::_invokeBasic: 864 { 865 // Get MethodHandle receiver: 866 Node* receiver = kit.argument(0); 867 if (receiver->Opcode() == Op_ConP) { 868 input_not_const = false; 869 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 870 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 871 const int vtable_index = Method::invalid_vtable_index; 872 873 if (!ciMethod::is_consistent_info(callee, target)) { 874 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 875 "signatures mismatch"); 876 return NULL; 877 } 878 879 CallGenerator* cg = C->call_generator(target, vtable_index, 880 false /* call_does_dispatch */, 881 jvms, 882 true /* allow_inline */, 883 PROB_ALWAYS); 884 return cg; 885 } else { 886 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 887 "receiver not constant"); 888 } 889 } 890 break; 891 892 case vmIntrinsics::_linkToVirtual: 893 case vmIntrinsics::_linkToStatic: 894 case vmIntrinsics::_linkToSpecial: 895 case vmIntrinsics::_linkToInterface: 896 { 897 // Get MemberName argument: 898 Node* member_name = kit.argument(callee->arg_size() - 1); 899 if (member_name->Opcode() == Op_ConP) { 900 input_not_const = false; 901 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 902 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 903 904 if (!ciMethod::is_consistent_info(callee, target)) { 905 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 906 "signatures mismatch"); 907 return NULL; 908 } 909 910 // In lambda forms we erase signature types to avoid resolving issues 911 // involving class loaders. When we optimize a method handle invoke 912 // to a direct call we must cast the receiver and arguments to its 913 // actual types. 914 ciSignature* signature = target->signature(); 915 const int receiver_skip = target->is_static() ? 0 : 1; 916 // Cast receiver to its type. 917 if (!target->is_static()) { 918 Node* arg = kit.argument(0); 919 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 920 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); 921 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 922 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 923 kit.set_argument(0, cast_obj); 924 } 925 } 926 // Cast reference arguments to its type. 927 for (int i = 0, j = 0; i < signature->count(); i++) { 928 ciType* t = signature->type_at(i); 929 if (t->is_klass()) { 930 Node* arg = kit.argument(receiver_skip + j); 931 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 932 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 933 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 934 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 935 kit.set_argument(receiver_skip + j, cast_obj); 936 } 937 } 938 j += t->size(); // long and double take two slots 939 } 940 941 // Try to get the most accurate receiver type 942 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 943 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 944 int vtable_index = Method::invalid_vtable_index; 945 bool call_does_dispatch = false; 946 947 ciKlass* speculative_receiver_type = NULL; 948 if (is_virtual_or_interface) { 949 ciInstanceKlass* klass = target->holder(); 950 Node* receiver_node = kit.argument(0); 951 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 952 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 953 // optimize_virtual_call() takes 2 different holder 954 // arguments for a corner case that doesn't apply here (see 955 // Parse::do_call()) 956 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass, 957 target, receiver_type, is_virtual, 958 call_does_dispatch, vtable_index, // out-parameters 959 false /* check_access */); 960 // We lack profiling at this call but type speculation may 961 // provide us with a type 962 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; 963 } 964 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, 965 true /* allow_inline */, 966 PROB_ALWAYS, 967 speculative_receiver_type); 968 return cg; 969 } else { 970 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), 971 "member_name not constant"); 972 } 973 } 974 break; 975 976 default: 977 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)); 978 break; 979 } 980 return NULL; 981 } 982 983 984 //------------------------PredicatedIntrinsicGenerator------------------------------ 985 // Internal class which handles all predicated Intrinsic calls. 986 class PredicatedIntrinsicGenerator : public CallGenerator { 987 CallGenerator* _intrinsic; 988 CallGenerator* _cg; 989 990 public: 991 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 992 CallGenerator* cg) 993 : CallGenerator(cg->method()) 994 { 995 _intrinsic = intrinsic; 996 _cg = cg; 997 } 998 999 virtual bool is_virtual() const { return true; } 1000 virtual bool is_inlined() const { return true; } 1001 virtual bool is_intrinsic() const { return true; } 1002 1003 virtual JVMState* generate(JVMState* jvms); 1004 }; 1005 1006 1007 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 1008 CallGenerator* cg) { 1009 return new PredicatedIntrinsicGenerator(intrinsic, cg); 1010 } 1011 1012 1013 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 1014 // The code we want to generate here is: 1015 // if (receiver == NULL) 1016 // uncommon_Trap 1017 // if (predicate(0)) 1018 // do_intrinsic(0) 1019 // else 1020 // if (predicate(1)) 1021 // do_intrinsic(1) 1022 // ... 1023 // else 1024 // do_java_comp 1025 1026 GraphKit kit(jvms); 1027 PhaseGVN& gvn = kit.gvn(); 1028 1029 CompileLog* log = kit.C->log(); 1030 if (log != NULL) { 1031 log->elem("predicated_intrinsic bci='%d' method='%d'", 1032 jvms->bci(), log->identify(method())); 1033 } 1034 1035 if (!method()->is_static()) { 1036 // We need an explicit receiver null_check before checking its type in predicate. 1037 // We share a map with the caller, so his JVMS gets adjusted. 1038 Node* receiver = kit.null_check_receiver_before_call(method()); 1039 if (kit.stopped()) { 1040 return kit.transfer_exceptions_into_jvms(); 1041 } 1042 } 1043 1044 int n_predicates = _intrinsic->predicates_count(); 1045 assert(n_predicates > 0, "sanity"); 1046 1047 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 1048 1049 // Region for normal compilation code if intrinsic failed. 1050 Node* slow_region = new RegionNode(1); 1051 1052 int results = 0; 1053 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 1054 #ifdef ASSERT 1055 JVMState* old_jvms = kit.jvms(); 1056 SafePointNode* old_map = kit.map(); 1057 Node* old_io = old_map->i_o(); 1058 Node* old_mem = old_map->memory(); 1059 Node* old_exc = old_map->next_exception(); 1060 #endif 1061 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 1062 #ifdef ASSERT 1063 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 1064 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 1065 SafePointNode* new_map = kit.map(); 1066 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 1067 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 1068 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 1069 #endif 1070 if (!kit.stopped()) { 1071 PreserveJVMState pjvms(&kit); 1072 // Generate intrinsic code: 1073 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 1074 if (new_jvms == NULL) { 1075 // Intrinsic failed, use normal compilation path for this predicate. 1076 slow_region->add_req(kit.control()); 1077 } else { 1078 kit.add_exception_states_from(new_jvms); 1079 kit.set_jvms(new_jvms); 1080 if (!kit.stopped()) { 1081 result_jvms[results++] = kit.jvms(); 1082 } 1083 } 1084 } 1085 if (else_ctrl == NULL) { 1086 else_ctrl = kit.C->top(); 1087 } 1088 kit.set_control(else_ctrl); 1089 } 1090 if (!kit.stopped()) { 1091 // Final 'else' after predicates. 1092 slow_region->add_req(kit.control()); 1093 } 1094 if (slow_region->req() > 1) { 1095 PreserveJVMState pjvms(&kit); 1096 // Generate normal compilation code: 1097 kit.set_control(gvn.transform(slow_region)); 1098 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1099 if (kit.failing()) 1100 return NULL; // might happen because of NodeCountInliningCutoff 1101 assert(new_jvms != NULL, "must be"); 1102 kit.add_exception_states_from(new_jvms); 1103 kit.set_jvms(new_jvms); 1104 if (!kit.stopped()) { 1105 result_jvms[results++] = kit.jvms(); 1106 } 1107 } 1108 1109 if (results == 0) { 1110 // All paths ended in uncommon traps. 1111 (void) kit.stop(); 1112 return kit.transfer_exceptions_into_jvms(); 1113 } 1114 1115 if (results == 1) { // Only one path 1116 kit.set_jvms(result_jvms[0]); 1117 return kit.transfer_exceptions_into_jvms(); 1118 } 1119 1120 // Merge all paths. 1121 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1122 RegionNode* region = new RegionNode(results + 1); 1123 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1124 for (int i = 0; i < results; i++) { 1125 JVMState* jvms = result_jvms[i]; 1126 int path = i + 1; 1127 SafePointNode* map = jvms->map(); 1128 region->init_req(path, map->control()); 1129 iophi->set_req(path, map->i_o()); 1130 if (i == 0) { 1131 kit.set_jvms(jvms); 1132 } else { 1133 kit.merge_memory(map->merged_memory(), region, path); 1134 } 1135 } 1136 kit.set_control(gvn.transform(region)); 1137 kit.set_i_o(gvn.transform(iophi)); 1138 // Transform new memory Phis. 1139 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1140 Node* phi = mms.memory(); 1141 if (phi->is_Phi() && phi->in(0) == region) { 1142 mms.set_memory(gvn.transform(phi)); 1143 } 1144 } 1145 1146 // Merge debug info. 1147 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1148 uint tos = kit.jvms()->stkoff() + kit.sp(); 1149 Node* map = kit.map(); 1150 uint limit = map->req(); 1151 for (uint i = TypeFunc::Parms; i < limit; i++) { 1152 // Skip unused stack slots; fast forward to monoff(); 1153 if (i == tos) { 1154 i = kit.jvms()->monoff(); 1155 if( i >= limit ) break; 1156 } 1157 Node* n = map->in(i); 1158 ins[0] = n; 1159 const Type* t = gvn.type(n); 1160 bool needs_phi = false; 1161 for (int j = 1; j < results; j++) { 1162 JVMState* jvms = result_jvms[j]; 1163 Node* jmap = jvms->map(); 1164 Node* m = NULL; 1165 if (jmap->req() > i) { 1166 m = jmap->in(i); 1167 if (m != n) { 1168 needs_phi = true; 1169 t = t->meet_speculative(gvn.type(m)); 1170 } 1171 } 1172 ins[j] = m; 1173 } 1174 if (needs_phi) { 1175 Node* phi = PhiNode::make(region, n, t); 1176 for (int j = 1; j < results; j++) { 1177 phi->set_req(j + 1, ins[j]); 1178 } 1179 map->set_req(i, gvn.transform(phi)); 1180 } 1181 } 1182 1183 return kit.transfer_exceptions_into_jvms(); 1184 } 1185 1186 //-------------------------UncommonTrapCallGenerator----------------------------- 1187 // Internal class which handles all out-of-line calls checking receiver type. 1188 class UncommonTrapCallGenerator : public CallGenerator { 1189 Deoptimization::DeoptReason _reason; 1190 Deoptimization::DeoptAction _action; 1191 1192 public: 1193 UncommonTrapCallGenerator(ciMethod* m, 1194 Deoptimization::DeoptReason reason, 1195 Deoptimization::DeoptAction action) 1196 : CallGenerator(m) 1197 { 1198 _reason = reason; 1199 _action = action; 1200 } 1201 1202 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1203 virtual bool is_trap() const { return true; } 1204 1205 virtual JVMState* generate(JVMState* jvms); 1206 }; 1207 1208 1209 CallGenerator* 1210 CallGenerator::for_uncommon_trap(ciMethod* m, 1211 Deoptimization::DeoptReason reason, 1212 Deoptimization::DeoptAction action) { 1213 return new UncommonTrapCallGenerator(m, reason, action); 1214 } 1215 1216 1217 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1218 GraphKit kit(jvms); 1219 kit.C->print_inlining_update(this); 1220 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1221 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 1222 // Use callsite signature always. 1223 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 1224 int nargs = declared_method->arg_size(); 1225 kit.inc_sp(nargs); 1226 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1227 if (_reason == Deoptimization::Reason_class_check && 1228 _action == Deoptimization::Action_maybe_recompile) { 1229 // Temp fix for 6529811 1230 // Don't allow uncommon_trap to override our decision to recompile in the event 1231 // of a class cast failure for a monomorphic call as it will never let us convert 1232 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1233 bool keep_exact_action = true; 1234 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1235 } else { 1236 kit.uncommon_trap(_reason, _action); 1237 } 1238 return kit.transfer_exceptions_into_jvms(); 1239 } 1240 1241 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1242 1243 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1244 1245 #define NODES_OVERHEAD_PER_METHOD (30.0) 1246 #define NODES_PER_BYTECODE (9.5) 1247 1248 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1249 int call_count = profile.count(); 1250 int code_size = call_method->code_size(); 1251 1252 // Expected execution count is based on the historical count: 1253 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1254 1255 // Expected profit from inlining, in units of simple call-overheads. 1256 _profit = 1.0; 1257 1258 // Expected work performed by the call in units of call-overheads. 1259 // %%% need an empirical curve fit for "work" (time in call) 1260 float bytecodes_per_call = 3; 1261 _work = 1.0 + code_size / bytecodes_per_call; 1262 1263 // Expected size of compilation graph: 1264 // -XX:+PrintParseStatistics once reported: 1265 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1266 // Histogram of 144298 parsed bytecodes: 1267 // %%% Need an better predictor for graph size. 1268 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1269 } 1270 1271 // is_cold: Return true if the node should never be inlined. 1272 // This is true if any of the key metrics are extreme. 1273 bool WarmCallInfo::is_cold() const { 1274 if (count() < WarmCallMinCount) return true; 1275 if (profit() < WarmCallMinProfit) return true; 1276 if (work() > WarmCallMaxWork) return true; 1277 if (size() > WarmCallMaxSize) return true; 1278 return false; 1279 } 1280 1281 // is_hot: Return true if the node should be inlined immediately. 1282 // This is true if any of the key metrics are extreme. 1283 bool WarmCallInfo::is_hot() const { 1284 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1285 if (count() >= HotCallCountThreshold) return true; 1286 if (profit() >= HotCallProfitThreshold) return true; 1287 if (work() <= HotCallTrivialWork) return true; 1288 if (size() <= HotCallTrivialSize) return true; 1289 return false; 1290 } 1291 1292 // compute_heat: 1293 float WarmCallInfo::compute_heat() const { 1294 assert(!is_cold(), "compute heat only on warm nodes"); 1295 assert(!is_hot(), "compute heat only on warm nodes"); 1296 int min_size = MAX2(0, (int)HotCallTrivialSize); 1297 int max_size = MIN2(500, (int)WarmCallMaxSize); 1298 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1299 float size_factor; 1300 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1301 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1302 else if (method_size < 0.5) size_factor = 1; // better than avg. 1303 else size_factor = 0.5; // worse than avg. 1304 return (count() * profit() * size_factor); 1305 } 1306 1307 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1308 assert(this != that, "compare only different WCIs"); 1309 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1310 if (this->heat() > that->heat()) return true; 1311 if (this->heat() < that->heat()) return false; 1312 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1313 // Equal heat. Break the tie some other way. 1314 if (!this->call() || !that->call()) return (address)this > (address)that; 1315 return this->call()->_idx > that->call()->_idx; 1316 } 1317 1318 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1319 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1320 1321 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1322 assert(next() == UNINIT_NEXT, "not yet on any list"); 1323 WarmCallInfo* prev_p = NULL; 1324 WarmCallInfo* next_p = head; 1325 while (next_p != NULL && next_p->warmer_than(this)) { 1326 prev_p = next_p; 1327 next_p = prev_p->next(); 1328 } 1329 // Install this between prev_p and next_p. 1330 this->set_next(next_p); 1331 if (prev_p == NULL) 1332 head = this; 1333 else 1334 prev_p->set_next(this); 1335 return head; 1336 } 1337 1338 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1339 WarmCallInfo* prev_p = NULL; 1340 WarmCallInfo* next_p = head; 1341 while (next_p != this) { 1342 assert(next_p != NULL, "this must be in the list somewhere"); 1343 prev_p = next_p; 1344 next_p = prev_p->next(); 1345 } 1346 next_p = this->next(); 1347 debug_only(this->set_next(UNINIT_NEXT)); 1348 // Remove this from between prev_p and next_p. 1349 if (prev_p == NULL) 1350 head = next_p; 1351 else 1352 prev_p->set_next(next_p); 1353 return head; 1354 } 1355 1356 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1357 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1358 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1359 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1360 1361 WarmCallInfo* WarmCallInfo::always_hot() { 1362 assert(_always_hot.is_hot(), "must always be hot"); 1363 return &_always_hot; 1364 } 1365 1366 WarmCallInfo* WarmCallInfo::always_cold() { 1367 assert(_always_cold.is_cold(), "must always be cold"); 1368 return &_always_cold; 1369 } 1370 1371 1372 #ifndef PRODUCT 1373 1374 void WarmCallInfo::print() const { 1375 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1376 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1377 count(), profit(), work(), size(), compute_heat(), next()); 1378 tty->cr(); 1379 if (call() != NULL) call()->dump(); 1380 } 1381 1382 void print_wci(WarmCallInfo* ci) { 1383 ci->print(); 1384 } 1385 1386 void WarmCallInfo::print_all() const { 1387 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1388 p->print(); 1389 } 1390 1391 int WarmCallInfo::count_all() const { 1392 int cnt = 0; 1393 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1394 cnt++; 1395 return cnt; 1396 } 1397 1398 #endif //PRODUCT