1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 44 // Utility function. 45 const TypeFunc* CallGenerator::tf() const { 46 return TypeFunc::make(method()); 47 } 48 49 //-----------------------------ParseGenerator--------------------------------- 50 // Internal class which handles all direct bytecode traversal. 51 class ParseGenerator : public InlineCallGenerator { 52 private: 53 bool _is_osr; 54 float _expected_uses; 55 56 public: 57 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 58 : InlineCallGenerator(method) 59 { 60 _is_osr = is_osr; 61 _expected_uses = expected_uses; 62 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 63 } 64 65 virtual bool is_parse() const { return true; } 66 virtual JVMState* generate(JVMState* jvms); 67 int is_osr() { return _is_osr; } 68 69 }; 70 71 JVMState* ParseGenerator::generate(JVMState* jvms) { 72 Compile* C = Compile::current(); 73 C->print_inlining_update(this); 74 75 if (is_osr()) { 76 // The JVMS for a OSR has a single argument (see its TypeFunc). 77 assert(jvms->depth() == 1, "no inline OSR"); 78 } 79 80 if (C->failing()) { 81 return NULL; // bailing out of the compile; do not try to parse 82 } 83 84 Parse parser(jvms, method(), _expected_uses); 85 // Grab signature for matching/allocation 86 #ifdef ASSERT 87 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 88 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 89 assert(C->env()->system_dictionary_modification_counter_changed(), 90 "Must invalidate if TypeFuncs differ"); 91 } 92 #endif 93 94 GraphKit& exits = parser.exits(); 95 96 if (C->failing()) { 97 while (exits.pop_exception_state() != NULL) ; 98 return NULL; 99 } 100 101 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 102 103 // Simply return the exit state of the parser, 104 // augmented by any exceptional states. 105 return exits.transfer_exceptions_into_jvms(); 106 } 107 108 //---------------------------DirectCallGenerator------------------------------ 109 // Internal class which handles all out-of-line calls w/o receiver type checks. 110 class DirectCallGenerator : public CallGenerator { 111 private: 112 CallStaticJavaNode* _call_node; 113 // Force separate memory and I/O projections for the exceptional 114 // paths to facilitate late inlinig. 115 bool _separate_io_proj; 116 117 public: 118 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 119 : CallGenerator(method), 120 _separate_io_proj(separate_io_proj) 121 { 122 } 123 virtual JVMState* generate(JVMState* jvms); 124 125 CallStaticJavaNode* call_node() const { return _call_node; } 126 }; 127 128 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 129 GraphKit kit(jvms); 130 kit.C->print_inlining_update(this); 131 bool is_static = method()->is_static(); 132 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 133 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 134 135 if (kit.C->log() != NULL) { 136 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 137 } 138 139 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 140 call->set_override_symbolic_info(override_symbolic_info()); 141 _call_node = call; // Save the call node in case we need it later 142 if (!is_static) { 143 // Make an explicit receiver null_check as part of this call. 144 // Since we share a map with the caller, his JVMS gets adjusted. 145 kit.null_check_receiver_before_call(method()); 146 if (kit.stopped()) { 147 // And dump it back to the caller, decorated with any exceptions: 148 return kit.transfer_exceptions_into_jvms(); 149 } 150 // Mark the call node as virtual, sort of: 151 call->set_optimized_virtual(true); 152 if (method()->is_method_handle_intrinsic() || 153 method()->is_compiled_lambda_form()) { 154 call->set_method_handle_invoke(true); 155 } 156 } 157 kit.set_arguments_for_java_call(call); 158 kit.set_edges_for_java_call(call, false, _separate_io_proj); 159 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 160 kit.push_node(method()->return_type()->basic_type(), ret); 161 return kit.transfer_exceptions_into_jvms(); 162 } 163 164 //--------------------------VirtualCallGenerator------------------------------ 165 // Internal class which handles all out-of-line calls checking receiver type. 166 class VirtualCallGenerator : public CallGenerator { 167 private: 168 int _vtable_index; 169 public: 170 VirtualCallGenerator(ciMethod* method, int vtable_index) 171 : CallGenerator(method), _vtable_index(vtable_index) 172 { 173 assert(vtable_index == Method::invalid_vtable_index || 174 vtable_index >= 0, "either invalid or usable"); 175 } 176 virtual bool is_virtual() const { return true; } 177 virtual JVMState* generate(JVMState* jvms); 178 }; 179 180 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 181 GraphKit kit(jvms); 182 Node* receiver = kit.argument(0); 183 184 kit.C->print_inlining_update(this); 185 186 if (kit.C->log() != NULL) { 187 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 188 } 189 190 // If the receiver is a constant null, do not torture the system 191 // by attempting to call through it. The compile will proceed 192 // correctly, but may bail out in final_graph_reshaping, because 193 // the call instruction will have a seemingly deficient out-count. 194 // (The bailout says something misleading about an "infinite loop".) 195 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 196 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); 197 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 198 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 199 kit.inc_sp(arg_size); // restore arguments 200 kit.uncommon_trap(Deoptimization::Reason_null_check, 201 Deoptimization::Action_none, 202 NULL, "null receiver"); 203 return kit.transfer_exceptions_into_jvms(); 204 } 205 206 // Ideally we would unconditionally do a null check here and let it 207 // be converted to an implicit check based on profile information. 208 // However currently the conversion to implicit null checks in 209 // Block::implicit_null_check() only looks for loads and stores, not calls. 210 ciMethod *caller = kit.method(); 211 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 212 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 213 ((ImplicitNullCheckThreshold > 0) && caller_md && 214 (caller_md->trap_count(Deoptimization::Reason_null_check) 215 >= (uint)ImplicitNullCheckThreshold))) { 216 // Make an explicit receiver null_check as part of this call. 217 // Since we share a map with the caller, his JVMS gets adjusted. 218 receiver = kit.null_check_receiver_before_call(method()); 219 if (kit.stopped()) { 220 // And dump it back to the caller, decorated with any exceptions: 221 return kit.transfer_exceptions_into_jvms(); 222 } 223 } 224 225 assert(!method()->is_static(), "virtual call must not be to static"); 226 assert(!method()->is_final(), "virtual call should not be to final"); 227 assert(!method()->is_private(), "virtual call should not be to private"); 228 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 229 "no vtable calls if +UseInlineCaches "); 230 address target = SharedRuntime::get_resolve_virtual_call_stub(); 231 // Normal inline cache used for call 232 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 233 call->set_override_symbolic_info(override_symbolic_info()); 234 kit.set_arguments_for_java_call(call); 235 kit.set_edges_for_java_call(call); 236 Node* ret = kit.set_results_for_java_call(call); 237 kit.push_node(method()->return_type()->basic_type(), ret); 238 239 // Represent the effect of an implicit receiver null_check 240 // as part of this call. Since we share a map with the caller, 241 // his JVMS gets adjusted. 242 kit.cast_not_null(receiver); 243 return kit.transfer_exceptions_into_jvms(); 244 } 245 246 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 247 if (InlineTree::check_can_parse(m) != NULL) return NULL; 248 return new ParseGenerator(m, expected_uses); 249 } 250 251 // As a special case, the JVMS passed to this CallGenerator is 252 // for the method execution already in progress, not just the JVMS 253 // of the caller. Thus, this CallGenerator cannot be mixed with others! 254 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 255 if (InlineTree::check_can_parse(m) != NULL) return NULL; 256 float past_uses = m->interpreter_invocation_count(); 257 float expected_uses = past_uses; 258 return new ParseGenerator(m, expected_uses, true); 259 } 260 261 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 262 assert(!m->is_abstract(), "for_direct_call mismatch"); 263 return new DirectCallGenerator(m, separate_io_proj); 264 } 265 266 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 267 assert(!m->is_static(), "for_virtual_call mismatch"); 268 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 269 return new VirtualCallGenerator(m, vtable_index); 270 } 271 272 // Allow inlining decisions to be delayed 273 class LateInlineCallGenerator : public DirectCallGenerator { 274 private: 275 // unique id for log compilation 276 jlong _unique_id; 277 278 protected: 279 CallGenerator* _inline_cg; 280 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 281 282 public: 283 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 284 DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {} 285 286 virtual bool is_late_inline() const { return true; } 287 288 // Convert the CallStaticJava into an inline 289 virtual void do_late_inline(); 290 291 virtual JVMState* generate(JVMState* jvms) { 292 Compile *C = Compile::current(); 293 294 C->log_inline_id(this); 295 296 // Record that this call site should be revisited once the main 297 // parse is finished. 298 if (!is_mh_late_inline()) { 299 C->add_late_inline(this); 300 } 301 302 // Emit the CallStaticJava and request separate projections so 303 // that the late inlining logic can distinguish between fall 304 // through and exceptional uses of the memory and io projections 305 // as is done for allocations and macro expansion. 306 return DirectCallGenerator::generate(jvms); 307 } 308 309 virtual void print_inlining_late(const char* msg) { 310 CallNode* call = call_node(); 311 Compile* C = Compile::current(); 312 C->print_inlining_assert_ready(); 313 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 314 C->print_inlining_move_to(this); 315 C->print_inlining_update_delayed(this); 316 } 317 318 virtual void set_unique_id(jlong id) { 319 _unique_id = id; 320 } 321 322 virtual jlong unique_id() const { 323 return _unique_id; 324 } 325 }; 326 327 void LateInlineCallGenerator::do_late_inline() { 328 // Can't inline it 329 CallStaticJavaNode* call = call_node(); 330 if (call == NULL || call->outcnt() == 0 || 331 call->in(0) == NULL || call->in(0)->is_top()) { 332 return; 333 } 334 335 const TypeTuple *r = call->tf()->domain(); 336 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 337 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 338 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 339 return; 340 } 341 } 342 343 if (call->in(TypeFunc::Memory)->is_top()) { 344 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 345 return; 346 } 347 348 Compile* C = Compile::current(); 349 // Remove inlined methods from Compiler's lists. 350 if (call->is_macro()) { 351 C->remove_macro_node(call); 352 } 353 354 // Make a clone of the JVMState that appropriate to use for driving a parse 355 JVMState* old_jvms = call->jvms(); 356 JVMState* jvms = old_jvms->clone_shallow(C); 357 uint size = call->req(); 358 SafePointNode* map = new SafePointNode(size, jvms); 359 for (uint i1 = 0; i1 < size; i1++) { 360 map->init_req(i1, call->in(i1)); 361 } 362 363 // Make sure the state is a MergeMem for parsing. 364 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 365 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); 366 C->initial_gvn()->set_type_bottom(mem); 367 map->set_req(TypeFunc::Memory, mem); 368 } 369 370 uint nargs = method()->arg_size(); 371 // blow away old call arguments 372 Node* top = C->top(); 373 for (uint i1 = 0; i1 < nargs; i1++) { 374 map->set_req(TypeFunc::Parms + i1, top); 375 } 376 jvms->set_map(map); 377 378 // Make enough space in the expression stack to transfer 379 // the incoming arguments and return value. 380 map->ensure_stack(jvms, jvms->method()->max_stack()); 381 for (uint i1 = 0; i1 < nargs; i1++) { 382 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); 383 } 384 385 C->print_inlining_assert_ready(); 386 387 C->print_inlining_move_to(this); 388 389 C->log_late_inline(this); 390 391 // This check is done here because for_method_handle_inline() method 392 // needs jvms for inlined state. 393 if (!do_late_inline_check(jvms)) { 394 map->disconnect_inputs(NULL, C); 395 return; 396 } 397 398 // Setup default node notes to be picked up by the inlining 399 Node_Notes* old_nn = C->node_notes_at(call->_idx); 400 if (old_nn != NULL) { 401 Node_Notes* entry_nn = old_nn->clone(C); 402 entry_nn->set_jvms(jvms); 403 C->set_default_node_notes(entry_nn); 404 } 405 406 // Now perform the inlining using the synthesized JVMState 407 JVMState* new_jvms = _inline_cg->generate(jvms); 408 if (new_jvms == NULL) return; // no change 409 if (C->failing()) return; 410 411 // Capture any exceptional control flow 412 GraphKit kit(new_jvms); 413 414 // Find the result object 415 Node* result = C->top(); 416 int result_size = method()->return_type()->size(); 417 if (result_size != 0 && !kit.stopped()) { 418 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 419 } 420 421 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 422 C->env()->notice_inlined_method(_inline_cg->method()); 423 C->set_inlining_progress(true); 424 425 kit.replace_call(call, result, true); 426 } 427 428 429 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 430 return new LateInlineCallGenerator(method, inline_cg); 431 } 432 433 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 434 ciMethod* _caller; 435 int _attempt; 436 bool _input_not_const; 437 438 virtual bool do_late_inline_check(JVMState* jvms); 439 virtual bool already_attempted() const { return _attempt > 0; } 440 441 public: 442 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 443 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 444 445 virtual bool is_mh_late_inline() const { return true; } 446 447 virtual JVMState* generate(JVMState* jvms) { 448 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 449 450 Compile* C = Compile::current(); 451 if (_input_not_const) { 452 // inlining won't be possible so no need to enqueue right now. 453 call_node()->set_generator(this); 454 } else { 455 C->add_late_inline(this); 456 } 457 return new_jvms; 458 } 459 }; 460 461 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 462 463 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); 464 465 Compile::current()->print_inlining_update_delayed(this); 466 467 if (!_input_not_const) { 468 _attempt++; 469 } 470 471 if (cg != NULL && cg->is_inline()) { 472 assert(!cg->is_late_inline(), "we're doing late inlining"); 473 _inline_cg = cg; 474 Compile::current()->dec_number_of_mh_late_inlines(); 475 return true; 476 } 477 478 call_node()->set_generator(this); 479 return false; 480 } 481 482 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 483 Compile::current()->inc_number_of_mh_late_inlines(); 484 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 485 return cg; 486 } 487 488 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 489 490 public: 491 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 492 LateInlineCallGenerator(method, inline_cg) {} 493 494 virtual JVMState* generate(JVMState* jvms) { 495 Compile *C = Compile::current(); 496 497 C->log_inline_id(this); 498 499 C->add_string_late_inline(this); 500 501 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 502 return new_jvms; 503 } 504 505 virtual bool is_string_late_inline() const { return true; } 506 }; 507 508 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 509 return new LateInlineStringCallGenerator(method, inline_cg); 510 } 511 512 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 513 514 public: 515 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 516 LateInlineCallGenerator(method, inline_cg) {} 517 518 virtual JVMState* generate(JVMState* jvms) { 519 Compile *C = Compile::current(); 520 521 C->log_inline_id(this); 522 523 C->add_boxing_late_inline(this); 524 525 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 526 return new_jvms; 527 } 528 }; 529 530 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 531 return new LateInlineBoxingCallGenerator(method, inline_cg); 532 } 533 534 //---------------------------WarmCallGenerator-------------------------------- 535 // Internal class which handles initial deferral of inlining decisions. 536 class WarmCallGenerator : public CallGenerator { 537 WarmCallInfo* _call_info; 538 CallGenerator* _if_cold; 539 CallGenerator* _if_hot; 540 bool _is_virtual; // caches virtuality of if_cold 541 bool _is_inline; // caches inline-ness of if_hot 542 543 public: 544 WarmCallGenerator(WarmCallInfo* ci, 545 CallGenerator* if_cold, 546 CallGenerator* if_hot) 547 : CallGenerator(if_cold->method()) 548 { 549 assert(method() == if_hot->method(), "consistent choices"); 550 _call_info = ci; 551 _if_cold = if_cold; 552 _if_hot = if_hot; 553 _is_virtual = if_cold->is_virtual(); 554 _is_inline = if_hot->is_inline(); 555 } 556 557 virtual bool is_inline() const { return _is_inline; } 558 virtual bool is_virtual() const { return _is_virtual; } 559 virtual bool is_deferred() const { return true; } 560 561 virtual JVMState* generate(JVMState* jvms); 562 }; 563 564 565 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 566 CallGenerator* if_cold, 567 CallGenerator* if_hot) { 568 return new WarmCallGenerator(ci, if_cold, if_hot); 569 } 570 571 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 572 Compile* C = Compile::current(); 573 C->print_inlining_update(this); 574 575 if (C->log() != NULL) { 576 C->log()->elem("warm_call bci='%d'", jvms->bci()); 577 } 578 jvms = _if_cold->generate(jvms); 579 if (jvms != NULL) { 580 Node* m = jvms->map()->control(); 581 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 582 if (m->is_Catch()) m = m->in(0); else m = C->top(); 583 if (m->is_Proj()) m = m->in(0); else m = C->top(); 584 if (m->is_CallJava()) { 585 _call_info->set_call(m->as_Call()); 586 _call_info->set_hot_cg(_if_hot); 587 #ifndef PRODUCT 588 if (PrintOpto || PrintOptoInlining) { 589 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 590 tty->print("WCI: "); 591 _call_info->print(); 592 } 593 #endif 594 _call_info->set_heat(_call_info->compute_heat()); 595 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 596 } 597 } 598 return jvms; 599 } 600 601 void WarmCallInfo::make_hot() { 602 Unimplemented(); 603 } 604 605 void WarmCallInfo::make_cold() { 606 // No action: Just dequeue. 607 } 608 609 610 //------------------------PredictedCallGenerator------------------------------ 611 // Internal class which handles all out-of-line calls checking receiver type. 612 class PredictedCallGenerator : public CallGenerator { 613 ciKlass* _predicted_receiver; 614 CallGenerator* _if_missed; 615 CallGenerator* _if_hit; 616 float _hit_prob; 617 618 public: 619 PredictedCallGenerator(ciKlass* predicted_receiver, 620 CallGenerator* if_missed, 621 CallGenerator* if_hit, float hit_prob) 622 : CallGenerator(if_missed->method()) 623 { 624 // The call profile data may predict the hit_prob as extreme as 0 or 1. 625 // Remove the extremes values from the range. 626 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 627 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 628 629 _predicted_receiver = predicted_receiver; 630 _if_missed = if_missed; 631 _if_hit = if_hit; 632 _hit_prob = hit_prob; 633 } 634 635 virtual bool is_virtual() const { return true; } 636 virtual bool is_inline() const { return _if_hit->is_inline(); } 637 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 638 639 virtual JVMState* generate(JVMState* jvms); 640 }; 641 642 643 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 644 CallGenerator* if_missed, 645 CallGenerator* if_hit, 646 float hit_prob) { 647 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 648 } 649 650 651 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 652 GraphKit kit(jvms); 653 kit.C->print_inlining_update(this); 654 PhaseGVN& gvn = kit.gvn(); 655 // We need an explicit receiver null_check before checking its type. 656 // We share a map with the caller, so his JVMS gets adjusted. 657 Node* receiver = kit.argument(0); 658 CompileLog* log = kit.C->log(); 659 if (log != NULL) { 660 log->elem("predicted_call bci='%d' klass='%d'", 661 jvms->bci(), log->identify(_predicted_receiver)); 662 } 663 664 receiver = kit.null_check_receiver_before_call(method()); 665 if (kit.stopped()) { 666 return kit.transfer_exceptions_into_jvms(); 667 } 668 669 // Make a copy of the replaced nodes in case we need to restore them 670 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 671 replaced_nodes.clone(); 672 673 Node* exact_receiver = receiver; // will get updated in place... 674 Node* slow_ctl = kit.type_check_receiver(receiver, 675 _predicted_receiver, _hit_prob, 676 &exact_receiver); 677 678 SafePointNode* slow_map = NULL; 679 JVMState* slow_jvms; 680 { PreserveJVMState pjvms(&kit); 681 kit.set_control(slow_ctl); 682 if (!kit.stopped()) { 683 slow_jvms = _if_missed->generate(kit.sync_jvms()); 684 if (kit.failing()) 685 return NULL; // might happen because of NodeCountInliningCutoff 686 assert(slow_jvms != NULL, "must be"); 687 kit.add_exception_states_from(slow_jvms); 688 kit.set_map(slow_jvms->map()); 689 if (!kit.stopped()) 690 slow_map = kit.stop(); 691 } 692 } 693 694 if (kit.stopped()) { 695 // Instance exactly does not matches the desired type. 696 kit.set_jvms(slow_jvms); 697 return kit.transfer_exceptions_into_jvms(); 698 } 699 700 // fall through if the instance exactly matches the desired type 701 kit.replace_in_map(receiver, exact_receiver); 702 703 // Make the hot call: 704 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 705 if (new_jvms == NULL) { 706 // Inline failed, so make a direct call. 707 assert(_if_hit->is_inline(), "must have been a failed inline"); 708 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 709 new_jvms = cg->generate(kit.sync_jvms()); 710 } 711 kit.add_exception_states_from(new_jvms); 712 kit.set_jvms(new_jvms); 713 714 // Need to merge slow and fast? 715 if (slow_map == NULL) { 716 // The fast path is the only path remaining. 717 return kit.transfer_exceptions_into_jvms(); 718 } 719 720 if (kit.stopped()) { 721 // Inlined method threw an exception, so it's just the slow path after all. 722 kit.set_jvms(slow_jvms); 723 return kit.transfer_exceptions_into_jvms(); 724 } 725 726 // There are 2 branches and the replaced nodes are only valid on 727 // one: restore the replaced nodes to what they were before the 728 // branch. 729 kit.map()->set_replaced_nodes(replaced_nodes); 730 731 // Finish the diamond. 732 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 733 RegionNode* region = new RegionNode(3); 734 region->init_req(1, kit.control()); 735 region->init_req(2, slow_map->control()); 736 kit.set_control(gvn.transform(region)); 737 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 738 iophi->set_req(2, slow_map->i_o()); 739 kit.set_i_o(gvn.transform(iophi)); 740 // Merge memory 741 kit.merge_memory(slow_map->merged_memory(), region, 2); 742 // Transform new memory Phis. 743 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 744 Node* phi = mms.memory(); 745 if (phi->is_Phi() && phi->in(0) == region) { 746 mms.set_memory(gvn.transform(phi)); 747 } 748 } 749 uint tos = kit.jvms()->stkoff() + kit.sp(); 750 uint limit = slow_map->req(); 751 for (uint i = TypeFunc::Parms; i < limit; i++) { 752 // Skip unused stack slots; fast forward to monoff(); 753 if (i == tos) { 754 i = kit.jvms()->monoff(); 755 if( i >= limit ) break; 756 } 757 Node* m = kit.map()->in(i); 758 Node* n = slow_map->in(i); 759 if (m != n) { 760 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 761 Node* phi = PhiNode::make(region, m, t); 762 phi->set_req(2, n); 763 kit.map()->set_req(i, gvn.transform(phi)); 764 } 765 } 766 return kit.transfer_exceptions_into_jvms(); 767 } 768 769 770 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 771 assert(callee->is_method_handle_intrinsic() || 772 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch"); 773 bool input_not_const; 774 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); 775 Compile* C = Compile::current(); 776 if (cg != NULL) { 777 if (!delayed_forbidden && AlwaysIncrementalInline) { 778 return CallGenerator::for_late_inline(callee, cg); 779 } else { 780 return cg; 781 } 782 } 783 int bci = jvms->bci(); 784 ciCallProfile profile = caller->call_profile_at_bci(bci); 785 int call_site_count = caller->scale_count(profile.count()); 786 787 if (IncrementalInline && call_site_count > 0 && 788 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { 789 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 790 } else { 791 // Out-of-line call. 792 return CallGenerator::for_direct_call(callee); 793 } 794 } 795 796 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { 797 GraphKit kit(jvms); 798 PhaseGVN& gvn = kit.gvn(); 799 Compile* C = kit.C; 800 vmIntrinsics::ID iid = callee->intrinsic_id(); 801 input_not_const = true; 802 switch (iid) { 803 case vmIntrinsics::_invokeBasic: 804 { 805 // Get MethodHandle receiver: 806 Node* receiver = kit.argument(0); 807 if (receiver->Opcode() == Op_ConP) { 808 input_not_const = false; 809 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 810 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 811 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove 812 const int vtable_index = Method::invalid_vtable_index; 813 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true); 814 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 815 if (cg != NULL) { 816 if (!cg->is_inline()) { 817 // To be able to issue a static call (and skip a call to MH.invokeBasic adapter), 818 // additional information about the method being invoked should be attached 819 // to the call site to make resolution logic work (see SharedRuntime::resolve_static_call_C). 820 cg->set_override_symbolic_info(true); 821 } 822 return cg; 823 } 824 } else { 825 const char* msg = "receiver not constant"; 826 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); 827 C->log_inline_failure(msg); 828 } 829 } 830 break; 831 832 case vmIntrinsics::_linkToVirtual: 833 case vmIntrinsics::_linkToStatic: 834 case vmIntrinsics::_linkToSpecial: 835 case vmIntrinsics::_linkToInterface: 836 { 837 // Get MemberName argument: 838 Node* member_name = kit.argument(callee->arg_size() - 1); 839 if (member_name->Opcode() == Op_ConP) { 840 input_not_const = false; 841 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 842 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 843 844 // In lambda forms we erase signature types to avoid resolving issues 845 // involving class loaders. When we optimize a method handle invoke 846 // to a direct call we must cast the receiver and arguments to its 847 // actual types. 848 ciSignature* signature = target->signature(); 849 const int receiver_skip = target->is_static() ? 0 : 1; 850 // Cast receiver to its type. 851 if (!target->is_static()) { 852 Node* arg = kit.argument(0); 853 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 854 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); 855 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 856 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 857 kit.set_argument(0, cast_obj); 858 } 859 } 860 // Cast reference arguments to its type. 861 for (int i = 0; i < signature->count(); i++) { 862 ciType* t = signature->type_at(i); 863 if (t->is_klass()) { 864 Node* arg = kit.argument(receiver_skip + i); 865 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 866 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 867 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 868 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 869 kit.set_argument(receiver_skip + i, cast_obj); 870 } 871 } 872 } 873 874 // Try to get the most accurate receiver type 875 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 876 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 877 int vtable_index = Method::invalid_vtable_index; 878 bool call_does_dispatch = false; 879 880 ciKlass* speculative_receiver_type = NULL; 881 if (is_virtual_or_interface) { 882 ciInstanceKlass* klass = target->holder(); 883 Node* receiver_node = kit.argument(0); 884 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 885 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 886 // optimize_virtual_call() takes 2 different holder 887 // arguments for a corner case that doesn't apply here (see 888 // Parse::do_call()) 889 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass, 890 target, receiver_type, is_virtual, 891 call_does_dispatch, vtable_index, // out-parameters 892 /*check_access=*/false); 893 // We lack profiling at this call but type speculation may 894 // provide us with a type 895 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; 896 } 897 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, /*allow_inline=*/true, PROB_ALWAYS, speculative_receiver_type, true, true); 898 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 899 if (cg != NULL) { 900 if (!cg->is_inline()) { 901 // To be able to issue a direct call (static, optimized virtual, or virtual) 902 // and skip a call to MH.linkTo* adapter, additional information about the method 903 // being invoked should be attached to the call site to make resolution logic work 904 // (see SharedRuntime::resolve_{static,virtual,opt_virtual}_call_C). 905 cg->set_override_symbolic_info(true); 906 } 907 return cg; 908 } 909 } else { 910 const char* msg = "member_name not constant"; 911 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); 912 C->log_inline_failure(msg); 913 } 914 } 915 break; 916 917 default: 918 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)); 919 break; 920 } 921 return NULL; 922 } 923 924 925 //------------------------PredicatedIntrinsicGenerator------------------------------ 926 // Internal class which handles all predicated Intrinsic calls. 927 class PredicatedIntrinsicGenerator : public CallGenerator { 928 CallGenerator* _intrinsic; 929 CallGenerator* _cg; 930 931 public: 932 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 933 CallGenerator* cg) 934 : CallGenerator(cg->method()) 935 { 936 _intrinsic = intrinsic; 937 _cg = cg; 938 } 939 940 virtual bool is_virtual() const { return true; } 941 virtual bool is_inlined() const { return true; } 942 virtual bool is_intrinsic() const { return true; } 943 944 virtual JVMState* generate(JVMState* jvms); 945 }; 946 947 948 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 949 CallGenerator* cg) { 950 return new PredicatedIntrinsicGenerator(intrinsic, cg); 951 } 952 953 954 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 955 // The code we want to generate here is: 956 // if (receiver == NULL) 957 // uncommon_Trap 958 // if (predicate(0)) 959 // do_intrinsic(0) 960 // else 961 // if (predicate(1)) 962 // do_intrinsic(1) 963 // ... 964 // else 965 // do_java_comp 966 967 GraphKit kit(jvms); 968 PhaseGVN& gvn = kit.gvn(); 969 970 CompileLog* log = kit.C->log(); 971 if (log != NULL) { 972 log->elem("predicated_intrinsic bci='%d' method='%d'", 973 jvms->bci(), log->identify(method())); 974 } 975 976 if (!method()->is_static()) { 977 // We need an explicit receiver null_check before checking its type in predicate. 978 // We share a map with the caller, so his JVMS gets adjusted. 979 Node* receiver = kit.null_check_receiver_before_call(method()); 980 if (kit.stopped()) { 981 return kit.transfer_exceptions_into_jvms(); 982 } 983 } 984 985 int n_predicates = _intrinsic->predicates_count(); 986 assert(n_predicates > 0, "sanity"); 987 988 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 989 990 // Region for normal compilation code if intrinsic failed. 991 Node* slow_region = new RegionNode(1); 992 993 int results = 0; 994 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 995 #ifdef ASSERT 996 JVMState* old_jvms = kit.jvms(); 997 SafePointNode* old_map = kit.map(); 998 Node* old_io = old_map->i_o(); 999 Node* old_mem = old_map->memory(); 1000 Node* old_exc = old_map->next_exception(); 1001 #endif 1002 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 1003 #ifdef ASSERT 1004 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 1005 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 1006 SafePointNode* new_map = kit.map(); 1007 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 1008 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 1009 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 1010 #endif 1011 if (!kit.stopped()) { 1012 PreserveJVMState pjvms(&kit); 1013 // Generate intrinsic code: 1014 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 1015 if (new_jvms == NULL) { 1016 // Intrinsic failed, use normal compilation path for this predicate. 1017 slow_region->add_req(kit.control()); 1018 } else { 1019 kit.add_exception_states_from(new_jvms); 1020 kit.set_jvms(new_jvms); 1021 if (!kit.stopped()) { 1022 result_jvms[results++] = kit.jvms(); 1023 } 1024 } 1025 } 1026 if (else_ctrl == NULL) { 1027 else_ctrl = kit.C->top(); 1028 } 1029 kit.set_control(else_ctrl); 1030 } 1031 if (!kit.stopped()) { 1032 // Final 'else' after predicates. 1033 slow_region->add_req(kit.control()); 1034 } 1035 if (slow_region->req() > 1) { 1036 PreserveJVMState pjvms(&kit); 1037 // Generate normal compilation code: 1038 kit.set_control(gvn.transform(slow_region)); 1039 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1040 if (kit.failing()) 1041 return NULL; // might happen because of NodeCountInliningCutoff 1042 assert(new_jvms != NULL, "must be"); 1043 kit.add_exception_states_from(new_jvms); 1044 kit.set_jvms(new_jvms); 1045 if (!kit.stopped()) { 1046 result_jvms[results++] = kit.jvms(); 1047 } 1048 } 1049 1050 if (results == 0) { 1051 // All paths ended in uncommon traps. 1052 (void) kit.stop(); 1053 return kit.transfer_exceptions_into_jvms(); 1054 } 1055 1056 if (results == 1) { // Only one path 1057 kit.set_jvms(result_jvms[0]); 1058 return kit.transfer_exceptions_into_jvms(); 1059 } 1060 1061 // Merge all paths. 1062 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1063 RegionNode* region = new RegionNode(results + 1); 1064 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1065 for (int i = 0; i < results; i++) { 1066 JVMState* jvms = result_jvms[i]; 1067 int path = i + 1; 1068 SafePointNode* map = jvms->map(); 1069 region->init_req(path, map->control()); 1070 iophi->set_req(path, map->i_o()); 1071 if (i == 0) { 1072 kit.set_jvms(jvms); 1073 } else { 1074 kit.merge_memory(map->merged_memory(), region, path); 1075 } 1076 } 1077 kit.set_control(gvn.transform(region)); 1078 kit.set_i_o(gvn.transform(iophi)); 1079 // Transform new memory Phis. 1080 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1081 Node* phi = mms.memory(); 1082 if (phi->is_Phi() && phi->in(0) == region) { 1083 mms.set_memory(gvn.transform(phi)); 1084 } 1085 } 1086 1087 // Merge debug info. 1088 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1089 uint tos = kit.jvms()->stkoff() + kit.sp(); 1090 Node* map = kit.map(); 1091 uint limit = map->req(); 1092 for (uint i = TypeFunc::Parms; i < limit; i++) { 1093 // Skip unused stack slots; fast forward to monoff(); 1094 if (i == tos) { 1095 i = kit.jvms()->monoff(); 1096 if( i >= limit ) break; 1097 } 1098 Node* n = map->in(i); 1099 ins[0] = n; 1100 const Type* t = gvn.type(n); 1101 bool needs_phi = false; 1102 for (int j = 1; j < results; j++) { 1103 JVMState* jvms = result_jvms[j]; 1104 Node* jmap = jvms->map(); 1105 Node* m = NULL; 1106 if (jmap->req() > i) { 1107 m = jmap->in(i); 1108 if (m != n) { 1109 needs_phi = true; 1110 t = t->meet_speculative(gvn.type(m)); 1111 } 1112 } 1113 ins[j] = m; 1114 } 1115 if (needs_phi) { 1116 Node* phi = PhiNode::make(region, n, t); 1117 for (int j = 1; j < results; j++) { 1118 phi->set_req(j + 1, ins[j]); 1119 } 1120 map->set_req(i, gvn.transform(phi)); 1121 } 1122 } 1123 1124 return kit.transfer_exceptions_into_jvms(); 1125 } 1126 1127 //-------------------------UncommonTrapCallGenerator----------------------------- 1128 // Internal class which handles all out-of-line calls checking receiver type. 1129 class UncommonTrapCallGenerator : public CallGenerator { 1130 Deoptimization::DeoptReason _reason; 1131 Deoptimization::DeoptAction _action; 1132 1133 public: 1134 UncommonTrapCallGenerator(ciMethod* m, 1135 Deoptimization::DeoptReason reason, 1136 Deoptimization::DeoptAction action) 1137 : CallGenerator(m) 1138 { 1139 _reason = reason; 1140 _action = action; 1141 } 1142 1143 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1144 virtual bool is_trap() const { return true; } 1145 1146 virtual JVMState* generate(JVMState* jvms); 1147 }; 1148 1149 1150 CallGenerator* 1151 CallGenerator::for_uncommon_trap(ciMethod* m, 1152 Deoptimization::DeoptReason reason, 1153 Deoptimization::DeoptAction action) { 1154 return new UncommonTrapCallGenerator(m, reason, action); 1155 } 1156 1157 1158 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1159 GraphKit kit(jvms); 1160 kit.C->print_inlining_update(this); 1161 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1162 int nargs = method()->arg_size(); 1163 kit.inc_sp(nargs); 1164 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1165 if (_reason == Deoptimization::Reason_class_check && 1166 _action == Deoptimization::Action_maybe_recompile) { 1167 // Temp fix for 6529811 1168 // Don't allow uncommon_trap to override our decision to recompile in the event 1169 // of a class cast failure for a monomorphic call as it will never let us convert 1170 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1171 bool keep_exact_action = true; 1172 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1173 } else { 1174 kit.uncommon_trap(_reason, _action); 1175 } 1176 return kit.transfer_exceptions_into_jvms(); 1177 } 1178 1179 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1180 1181 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1182 1183 #define NODES_OVERHEAD_PER_METHOD (30.0) 1184 #define NODES_PER_BYTECODE (9.5) 1185 1186 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1187 int call_count = profile.count(); 1188 int code_size = call_method->code_size(); 1189 1190 // Expected execution count is based on the historical count: 1191 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1192 1193 // Expected profit from inlining, in units of simple call-overheads. 1194 _profit = 1.0; 1195 1196 // Expected work performed by the call in units of call-overheads. 1197 // %%% need an empirical curve fit for "work" (time in call) 1198 float bytecodes_per_call = 3; 1199 _work = 1.0 + code_size / bytecodes_per_call; 1200 1201 // Expected size of compilation graph: 1202 // -XX:+PrintParseStatistics once reported: 1203 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1204 // Histogram of 144298 parsed bytecodes: 1205 // %%% Need an better predictor for graph size. 1206 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1207 } 1208 1209 // is_cold: Return true if the node should never be inlined. 1210 // This is true if any of the key metrics are extreme. 1211 bool WarmCallInfo::is_cold() const { 1212 if (count() < WarmCallMinCount) return true; 1213 if (profit() < WarmCallMinProfit) return true; 1214 if (work() > WarmCallMaxWork) return true; 1215 if (size() > WarmCallMaxSize) return true; 1216 return false; 1217 } 1218 1219 // is_hot: Return true if the node should be inlined immediately. 1220 // This is true if any of the key metrics are extreme. 1221 bool WarmCallInfo::is_hot() const { 1222 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1223 if (count() >= HotCallCountThreshold) return true; 1224 if (profit() >= HotCallProfitThreshold) return true; 1225 if (work() <= HotCallTrivialWork) return true; 1226 if (size() <= HotCallTrivialSize) return true; 1227 return false; 1228 } 1229 1230 // compute_heat: 1231 float WarmCallInfo::compute_heat() const { 1232 assert(!is_cold(), "compute heat only on warm nodes"); 1233 assert(!is_hot(), "compute heat only on warm nodes"); 1234 int min_size = MAX2(0, (int)HotCallTrivialSize); 1235 int max_size = MIN2(500, (int)WarmCallMaxSize); 1236 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1237 float size_factor; 1238 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1239 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1240 else if (method_size < 0.5) size_factor = 1; // better than avg. 1241 else size_factor = 0.5; // worse than avg. 1242 return (count() * profit() * size_factor); 1243 } 1244 1245 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1246 assert(this != that, "compare only different WCIs"); 1247 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1248 if (this->heat() > that->heat()) return true; 1249 if (this->heat() < that->heat()) return false; 1250 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1251 // Equal heat. Break the tie some other way. 1252 if (!this->call() || !that->call()) return (address)this > (address)that; 1253 return this->call()->_idx > that->call()->_idx; 1254 } 1255 1256 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1257 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1258 1259 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1260 assert(next() == UNINIT_NEXT, "not yet on any list"); 1261 WarmCallInfo* prev_p = NULL; 1262 WarmCallInfo* next_p = head; 1263 while (next_p != NULL && next_p->warmer_than(this)) { 1264 prev_p = next_p; 1265 next_p = prev_p->next(); 1266 } 1267 // Install this between prev_p and next_p. 1268 this->set_next(next_p); 1269 if (prev_p == NULL) 1270 head = this; 1271 else 1272 prev_p->set_next(this); 1273 return head; 1274 } 1275 1276 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1277 WarmCallInfo* prev_p = NULL; 1278 WarmCallInfo* next_p = head; 1279 while (next_p != this) { 1280 assert(next_p != NULL, "this must be in the list somewhere"); 1281 prev_p = next_p; 1282 next_p = prev_p->next(); 1283 } 1284 next_p = this->next(); 1285 debug_only(this->set_next(UNINIT_NEXT)); 1286 // Remove this from between prev_p and next_p. 1287 if (prev_p == NULL) 1288 head = next_p; 1289 else 1290 prev_p->set_next(next_p); 1291 return head; 1292 } 1293 1294 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1295 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1296 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1297 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1298 1299 WarmCallInfo* WarmCallInfo::always_hot() { 1300 assert(_always_hot.is_hot(), "must always be hot"); 1301 return &_always_hot; 1302 } 1303 1304 WarmCallInfo* WarmCallInfo::always_cold() { 1305 assert(_always_cold.is_cold(), "must always be cold"); 1306 return &_always_cold; 1307 } 1308 1309 1310 #ifndef PRODUCT 1311 1312 void WarmCallInfo::print() const { 1313 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1314 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1315 count(), profit(), work(), size(), compute_heat(), next()); 1316 tty->cr(); 1317 if (call() != NULL) call()->dump(); 1318 } 1319 1320 void print_wci(WarmCallInfo* ci) { 1321 ci->print(); 1322 } 1323 1324 void WarmCallInfo::print_all() const { 1325 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1326 p->print(); 1327 } 1328 1329 int WarmCallInfo::count_all() const { 1330 int cnt = 0; 1331 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1332 cnt++; 1333 return cnt; 1334 } 1335 1336 #endif //PRODUCT