1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 44 // Utility function. 45 const TypeFunc* CallGenerator::tf() const { 46 return TypeFunc::make(method()); 47 } 48 49 bool CallGenerator::is_inlined_mh_linker(JVMState* jvms, ciMethod* callee) { 50 ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci()); 51 return symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic(); 52 } 53 54 //-----------------------------ParseGenerator--------------------------------- 55 // Internal class which handles all direct bytecode traversal. 56 class ParseGenerator : public InlineCallGenerator { 57 private: 58 bool _is_osr; 59 float _expected_uses; 60 61 public: 62 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 63 : InlineCallGenerator(method) 64 { 65 _is_osr = is_osr; 66 _expected_uses = expected_uses; 67 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 68 } 69 70 virtual bool is_parse() const { return true; } 71 virtual JVMState* generate(JVMState* jvms); 72 int is_osr() { return _is_osr; } 73 74 }; 75 76 JVMState* ParseGenerator::generate(JVMState* jvms) { 77 Compile* C = Compile::current(); 78 C->print_inlining_update(this); 79 80 if (is_osr()) { 81 // The JVMS for a OSR has a single argument (see its TypeFunc). 82 assert(jvms->depth() == 1, "no inline OSR"); 83 } 84 85 if (C->failing()) { 86 return NULL; // bailing out of the compile; do not try to parse 87 } 88 89 Parse parser(jvms, method(), _expected_uses); 90 // Grab signature for matching/allocation 91 #ifdef ASSERT 92 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 93 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 94 assert(C->env()->system_dictionary_modification_counter_changed(), 95 "Must invalidate if TypeFuncs differ"); 96 } 97 #endif 98 99 GraphKit& exits = parser.exits(); 100 101 if (C->failing()) { 102 while (exits.pop_exception_state() != NULL) ; 103 return NULL; 104 } 105 106 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 107 108 // Simply return the exit state of the parser, 109 // augmented by any exceptional states. 110 return exits.transfer_exceptions_into_jvms(); 111 } 112 113 //---------------------------DirectCallGenerator------------------------------ 114 // Internal class which handles all out-of-line calls w/o receiver type checks. 115 class DirectCallGenerator : public CallGenerator { 116 private: 117 CallStaticJavaNode* _call_node; 118 // Force separate memory and I/O projections for the exceptional 119 // paths to facilitate late inlinig. 120 bool _separate_io_proj; 121 122 public: 123 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 124 : CallGenerator(method), 125 _separate_io_proj(separate_io_proj) 126 { 127 } 128 virtual JVMState* generate(JVMState* jvms); 129 130 CallStaticJavaNode* call_node() const { return _call_node; } 131 }; 132 133 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 134 GraphKit kit(jvms); 135 kit.C->print_inlining_update(this); 136 bool is_static = method()->is_static(); 137 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 138 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 139 140 if (kit.C->log() != NULL) { 141 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 142 } 143 144 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 145 if (is_inlined_mh_linker(jvms, method())) { 146 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter, 147 // additional information about the method being invoked should be attached 148 // to the call site to make resolution logic work 149 // (see SharedRuntime::resolve_static_call_C). 150 call->set_override_symbolic_info(true); 151 } 152 _call_node = call; // Save the call node in case we need it later 153 if (!is_static) { 154 // Make an explicit receiver null_check as part of this call. 155 // Since we share a map with the caller, his JVMS gets adjusted. 156 kit.null_check_receiver_before_call(method()); 157 if (kit.stopped()) { 158 // And dump it back to the caller, decorated with any exceptions: 159 return kit.transfer_exceptions_into_jvms(); 160 } 161 // Mark the call node as virtual, sort of: 162 call->set_optimized_virtual(true); 163 if (method()->is_method_handle_intrinsic() || 164 method()->is_compiled_lambda_form()) { 165 call->set_method_handle_invoke(true); 166 } 167 } 168 kit.set_arguments_for_java_call(call); 169 kit.set_edges_for_java_call(call, false, _separate_io_proj); 170 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 171 kit.push_node(method()->return_type()->basic_type(), ret); 172 return kit.transfer_exceptions_into_jvms(); 173 } 174 175 //--------------------------VirtualCallGenerator------------------------------ 176 // Internal class which handles all out-of-line calls checking receiver type. 177 class VirtualCallGenerator : public CallGenerator { 178 private: 179 int _vtable_index; 180 public: 181 VirtualCallGenerator(ciMethod* method, int vtable_index) 182 : CallGenerator(method), _vtable_index(vtable_index) 183 { 184 assert(vtable_index == Method::invalid_vtable_index || 185 vtable_index >= 0, "either invalid or usable"); 186 } 187 virtual bool is_virtual() const { return true; } 188 virtual JVMState* generate(JVMState* jvms); 189 }; 190 191 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 192 GraphKit kit(jvms); 193 Node* receiver = kit.argument(0); 194 195 kit.C->print_inlining_update(this); 196 197 if (kit.C->log() != NULL) { 198 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 199 } 200 201 // If the receiver is a constant null, do not torture the system 202 // by attempting to call through it. The compile will proceed 203 // correctly, but may bail out in final_graph_reshaping, because 204 // the call instruction will have a seemingly deficient out-count. 205 // (The bailout says something misleading about an "infinite loop".) 206 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 207 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())); 208 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 209 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 210 kit.inc_sp(arg_size); // restore arguments 211 kit.uncommon_trap(Deoptimization::Reason_null_check, 212 Deoptimization::Action_none, 213 NULL, "null receiver"); 214 return kit.transfer_exceptions_into_jvms(); 215 } 216 217 // Ideally we would unconditionally do a null check here and let it 218 // be converted to an implicit check based on profile information. 219 // However currently the conversion to implicit null checks in 220 // Block::implicit_null_check() only looks for loads and stores, not calls. 221 ciMethod *caller = kit.method(); 222 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 223 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 224 ((ImplicitNullCheckThreshold > 0) && caller_md && 225 (caller_md->trap_count(Deoptimization::Reason_null_check) 226 >= (uint)ImplicitNullCheckThreshold))) { 227 // Make an explicit receiver null_check as part of this call. 228 // Since we share a map with the caller, his JVMS gets adjusted. 229 receiver = kit.null_check_receiver_before_call(method()); 230 if (kit.stopped()) { 231 // And dump it back to the caller, decorated with any exceptions: 232 return kit.transfer_exceptions_into_jvms(); 233 } 234 } 235 236 assert(!method()->is_static(), "virtual call must not be to static"); 237 assert(!method()->is_final(), "virtual call should not be to final"); 238 assert(!method()->is_private(), "virtual call should not be to private"); 239 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 240 "no vtable calls if +UseInlineCaches "); 241 address target = SharedRuntime::get_resolve_virtual_call_stub(); 242 // Normal inline cache used for call 243 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 244 if (is_inlined_mh_linker(jvms, method())) { 245 // To be able to issue a direct call (optimized virtual or virtual) 246 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information 247 // about the method being invoked should be attached to the call site to 248 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C). 249 call->set_override_symbolic_info(true); 250 } 251 kit.set_arguments_for_java_call(call); 252 kit.set_edges_for_java_call(call); 253 Node* ret = kit.set_results_for_java_call(call); 254 kit.push_node(method()->return_type()->basic_type(), ret); 255 256 // Represent the effect of an implicit receiver null_check 257 // as part of this call. Since we share a map with the caller, 258 // his JVMS gets adjusted. 259 kit.cast_not_null(receiver); 260 return kit.transfer_exceptions_into_jvms(); 261 } 262 263 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 264 if (InlineTree::check_can_parse(m) != NULL) return NULL; 265 return new ParseGenerator(m, expected_uses); 266 } 267 268 // As a special case, the JVMS passed to this CallGenerator is 269 // for the method execution already in progress, not just the JVMS 270 // of the caller. Thus, this CallGenerator cannot be mixed with others! 271 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 272 if (InlineTree::check_can_parse(m) != NULL) return NULL; 273 float past_uses = m->interpreter_invocation_count(); 274 float expected_uses = past_uses; 275 return new ParseGenerator(m, expected_uses, true); 276 } 277 278 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 279 assert(!m->is_abstract(), "for_direct_call mismatch"); 280 return new DirectCallGenerator(m, separate_io_proj); 281 } 282 283 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 284 assert(!m->is_static(), "for_virtual_call mismatch"); 285 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 286 return new VirtualCallGenerator(m, vtable_index); 287 } 288 289 // Allow inlining decisions to be delayed 290 class LateInlineCallGenerator : public DirectCallGenerator { 291 private: 292 // unique id for log compilation 293 jlong _unique_id; 294 295 protected: 296 CallGenerator* _inline_cg; 297 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 298 299 public: 300 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 301 DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {} 302 303 virtual bool is_late_inline() const { return true; } 304 305 // Convert the CallStaticJava into an inline 306 virtual void do_late_inline(); 307 308 virtual JVMState* generate(JVMState* jvms) { 309 Compile *C = Compile::current(); 310 311 C->log_inline_id(this); 312 313 // Record that this call site should be revisited once the main 314 // parse is finished. 315 if (!is_mh_late_inline()) { 316 C->add_late_inline(this); 317 } 318 319 // Emit the CallStaticJava and request separate projections so 320 // that the late inlining logic can distinguish between fall 321 // through and exceptional uses of the memory and io projections 322 // as is done for allocations and macro expansion. 323 return DirectCallGenerator::generate(jvms); 324 } 325 326 virtual void print_inlining_late(const char* msg) { 327 CallNode* call = call_node(); 328 Compile* C = Compile::current(); 329 C->print_inlining_assert_ready(); 330 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 331 C->print_inlining_move_to(this); 332 C->print_inlining_update_delayed(this); 333 } 334 335 virtual void set_unique_id(jlong id) { 336 _unique_id = id; 337 } 338 339 virtual jlong unique_id() const { 340 return _unique_id; 341 } 342 }; 343 344 void LateInlineCallGenerator::do_late_inline() { 345 // Can't inline it 346 CallStaticJavaNode* call = call_node(); 347 if (call == NULL || call->outcnt() == 0 || 348 call->in(0) == NULL || call->in(0)->is_top()) { 349 return; 350 } 351 352 const TypeTuple *r = call->tf()->domain(); 353 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 354 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 355 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 356 return; 357 } 358 } 359 360 if (call->in(TypeFunc::Memory)->is_top()) { 361 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 362 return; 363 } 364 365 Compile* C = Compile::current(); 366 // Remove inlined methods from Compiler's lists. 367 if (call->is_macro()) { 368 C->remove_macro_node(call); 369 } 370 371 // Make a clone of the JVMState that appropriate to use for driving a parse 372 JVMState* old_jvms = call->jvms(); 373 JVMState* jvms = old_jvms->clone_shallow(C); 374 uint size = call->req(); 375 SafePointNode* map = new SafePointNode(size, jvms); 376 for (uint i1 = 0; i1 < size; i1++) { 377 map->init_req(i1, call->in(i1)); 378 } 379 380 // Make sure the state is a MergeMem for parsing. 381 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 382 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory)); 383 C->initial_gvn()->set_type_bottom(mem); 384 map->set_req(TypeFunc::Memory, mem); 385 } 386 387 uint nargs = method()->arg_size(); 388 // blow away old call arguments 389 Node* top = C->top(); 390 for (uint i1 = 0; i1 < nargs; i1++) { 391 map->set_req(TypeFunc::Parms + i1, top); 392 } 393 jvms->set_map(map); 394 395 // Make enough space in the expression stack to transfer 396 // the incoming arguments and return value. 397 map->ensure_stack(jvms, jvms->method()->max_stack()); 398 for (uint i1 = 0; i1 < nargs; i1++) { 399 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); 400 } 401 402 C->print_inlining_assert_ready(); 403 404 C->print_inlining_move_to(this); 405 406 C->log_late_inline(this); 407 408 // This check is done here because for_method_handle_inline() method 409 // needs jvms for inlined state. 410 if (!do_late_inline_check(jvms)) { 411 map->disconnect_inputs(NULL, C); 412 return; 413 } 414 415 // Setup default node notes to be picked up by the inlining 416 Node_Notes* old_nn = C->node_notes_at(call->_idx); 417 if (old_nn != NULL) { 418 Node_Notes* entry_nn = old_nn->clone(C); 419 entry_nn->set_jvms(jvms); 420 C->set_default_node_notes(entry_nn); 421 } 422 423 // Now perform the inlining using the synthesized JVMState 424 JVMState* new_jvms = _inline_cg->generate(jvms); 425 if (new_jvms == NULL) return; // no change 426 if (C->failing()) return; 427 428 // Capture any exceptional control flow 429 GraphKit kit(new_jvms); 430 431 // Find the result object 432 Node* result = C->top(); 433 int result_size = method()->return_type()->size(); 434 if (result_size != 0 && !kit.stopped()) { 435 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 436 } 437 438 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 439 C->env()->notice_inlined_method(_inline_cg->method()); 440 C->set_inlining_progress(true); 441 442 kit.replace_call(call, result, true); 443 } 444 445 446 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 447 return new LateInlineCallGenerator(method, inline_cg); 448 } 449 450 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 451 ciMethod* _caller; 452 int _attempt; 453 bool _input_not_const; 454 455 virtual bool do_late_inline_check(JVMState* jvms); 456 virtual bool already_attempted() const { return _attempt > 0; } 457 458 public: 459 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 460 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 461 462 virtual bool is_mh_late_inline() const { return true; } 463 464 virtual JVMState* generate(JVMState* jvms) { 465 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 466 467 Compile* C = Compile::current(); 468 if (_input_not_const) { 469 // inlining won't be possible so no need to enqueue right now. 470 call_node()->set_generator(this); 471 } else { 472 C->add_late_inline(this); 473 } 474 return new_jvms; 475 } 476 }; 477 478 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 479 480 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); 481 482 Compile::current()->print_inlining_update_delayed(this); 483 484 if (!_input_not_const) { 485 _attempt++; 486 } 487 488 if (cg != NULL && cg->is_inline()) { 489 assert(!cg->is_late_inline(), "we're doing late inlining"); 490 _inline_cg = cg; 491 Compile::current()->dec_number_of_mh_late_inlines(); 492 return true; 493 } 494 495 call_node()->set_generator(this); 496 return false; 497 } 498 499 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 500 Compile::current()->inc_number_of_mh_late_inlines(); 501 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 502 return cg; 503 } 504 505 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 506 507 public: 508 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 509 LateInlineCallGenerator(method, inline_cg) {} 510 511 virtual JVMState* generate(JVMState* jvms) { 512 Compile *C = Compile::current(); 513 514 C->log_inline_id(this); 515 516 C->add_string_late_inline(this); 517 518 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 519 return new_jvms; 520 } 521 522 virtual bool is_string_late_inline() const { return true; } 523 }; 524 525 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 526 return new LateInlineStringCallGenerator(method, inline_cg); 527 } 528 529 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 530 531 public: 532 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 533 LateInlineCallGenerator(method, inline_cg) {} 534 535 virtual JVMState* generate(JVMState* jvms) { 536 Compile *C = Compile::current(); 537 538 C->log_inline_id(this); 539 540 C->add_boxing_late_inline(this); 541 542 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 543 return new_jvms; 544 } 545 }; 546 547 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 548 return new LateInlineBoxingCallGenerator(method, inline_cg); 549 } 550 551 //---------------------------WarmCallGenerator-------------------------------- 552 // Internal class which handles initial deferral of inlining decisions. 553 class WarmCallGenerator : public CallGenerator { 554 WarmCallInfo* _call_info; 555 CallGenerator* _if_cold; 556 CallGenerator* _if_hot; 557 bool _is_virtual; // caches virtuality of if_cold 558 bool _is_inline; // caches inline-ness of if_hot 559 560 public: 561 WarmCallGenerator(WarmCallInfo* ci, 562 CallGenerator* if_cold, 563 CallGenerator* if_hot) 564 : CallGenerator(if_cold->method()) 565 { 566 assert(method() == if_hot->method(), "consistent choices"); 567 _call_info = ci; 568 _if_cold = if_cold; 569 _if_hot = if_hot; 570 _is_virtual = if_cold->is_virtual(); 571 _is_inline = if_hot->is_inline(); 572 } 573 574 virtual bool is_inline() const { return _is_inline; } 575 virtual bool is_virtual() const { return _is_virtual; } 576 virtual bool is_deferred() const { return true; } 577 578 virtual JVMState* generate(JVMState* jvms); 579 }; 580 581 582 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 583 CallGenerator* if_cold, 584 CallGenerator* if_hot) { 585 return new WarmCallGenerator(ci, if_cold, if_hot); 586 } 587 588 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 589 Compile* C = Compile::current(); 590 C->print_inlining_update(this); 591 592 if (C->log() != NULL) { 593 C->log()->elem("warm_call bci='%d'", jvms->bci()); 594 } 595 jvms = _if_cold->generate(jvms); 596 if (jvms != NULL) { 597 Node* m = jvms->map()->control(); 598 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 599 if (m->is_Catch()) m = m->in(0); else m = C->top(); 600 if (m->is_Proj()) m = m->in(0); else m = C->top(); 601 if (m->is_CallJava()) { 602 _call_info->set_call(m->as_Call()); 603 _call_info->set_hot_cg(_if_hot); 604 #ifndef PRODUCT 605 if (PrintOpto || PrintOptoInlining) { 606 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 607 tty->print("WCI: "); 608 _call_info->print(); 609 } 610 #endif 611 _call_info->set_heat(_call_info->compute_heat()); 612 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 613 } 614 } 615 return jvms; 616 } 617 618 void WarmCallInfo::make_hot() { 619 Unimplemented(); 620 } 621 622 void WarmCallInfo::make_cold() { 623 // No action: Just dequeue. 624 } 625 626 627 //------------------------PredictedCallGenerator------------------------------ 628 // Internal class which handles all out-of-line calls checking receiver type. 629 class PredictedCallGenerator : public CallGenerator { 630 ciKlass* _predicted_receiver; 631 CallGenerator* _if_missed; 632 CallGenerator* _if_hit; 633 float _hit_prob; 634 635 public: 636 PredictedCallGenerator(ciKlass* predicted_receiver, 637 CallGenerator* if_missed, 638 CallGenerator* if_hit, float hit_prob) 639 : CallGenerator(if_missed->method()) 640 { 641 // The call profile data may predict the hit_prob as extreme as 0 or 1. 642 // Remove the extremes values from the range. 643 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 644 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 645 646 _predicted_receiver = predicted_receiver; 647 _if_missed = if_missed; 648 _if_hit = if_hit; 649 _hit_prob = hit_prob; 650 } 651 652 virtual bool is_virtual() const { return true; } 653 virtual bool is_inline() const { return _if_hit->is_inline(); } 654 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 655 656 virtual JVMState* generate(JVMState* jvms); 657 }; 658 659 660 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 661 CallGenerator* if_missed, 662 CallGenerator* if_hit, 663 float hit_prob) { 664 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 665 } 666 667 668 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 669 GraphKit kit(jvms); 670 kit.C->print_inlining_update(this); 671 PhaseGVN& gvn = kit.gvn(); 672 // We need an explicit receiver null_check before checking its type. 673 // We share a map with the caller, so his JVMS gets adjusted. 674 Node* receiver = kit.argument(0); 675 CompileLog* log = kit.C->log(); 676 if (log != NULL) { 677 log->elem("predicted_call bci='%d' klass='%d'", 678 jvms->bci(), log->identify(_predicted_receiver)); 679 } 680 681 receiver = kit.null_check_receiver_before_call(method()); 682 if (kit.stopped()) { 683 return kit.transfer_exceptions_into_jvms(); 684 } 685 686 // Make a copy of the replaced nodes in case we need to restore them 687 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 688 replaced_nodes.clone(); 689 690 Node* exact_receiver = receiver; // will get updated in place... 691 Node* slow_ctl = kit.type_check_receiver(receiver, 692 _predicted_receiver, _hit_prob, 693 &exact_receiver); 694 695 SafePointNode* slow_map = NULL; 696 JVMState* slow_jvms = NULL; 697 { PreserveJVMState pjvms(&kit); 698 kit.set_control(slow_ctl); 699 if (!kit.stopped()) { 700 slow_jvms = _if_missed->generate(kit.sync_jvms()); 701 if (kit.failing()) 702 return NULL; // might happen because of NodeCountInliningCutoff 703 assert(slow_jvms != NULL, "must be"); 704 kit.add_exception_states_from(slow_jvms); 705 kit.set_map(slow_jvms->map()); 706 if (!kit.stopped()) 707 slow_map = kit.stop(); 708 } 709 } 710 711 if (kit.stopped()) { 712 // Instance exactly does not matches the desired type. 713 kit.set_jvms(slow_jvms); 714 return kit.transfer_exceptions_into_jvms(); 715 } 716 717 // fall through if the instance exactly matches the desired type 718 kit.replace_in_map(receiver, exact_receiver); 719 720 // Make the hot call: 721 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 722 if (new_jvms == NULL) { 723 // Inline failed, so make a direct call. 724 assert(_if_hit->is_inline(), "must have been a failed inline"); 725 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 726 new_jvms = cg->generate(kit.sync_jvms()); 727 } 728 kit.add_exception_states_from(new_jvms); 729 kit.set_jvms(new_jvms); 730 731 // Need to merge slow and fast? 732 if (slow_map == NULL) { 733 // The fast path is the only path remaining. 734 return kit.transfer_exceptions_into_jvms(); 735 } 736 737 if (kit.stopped()) { 738 // Inlined method threw an exception, so it's just the slow path after all. 739 kit.set_jvms(slow_jvms); 740 return kit.transfer_exceptions_into_jvms(); 741 } 742 743 // There are 2 branches and the replaced nodes are only valid on 744 // one: restore the replaced nodes to what they were before the 745 // branch. 746 kit.map()->set_replaced_nodes(replaced_nodes); 747 748 // Finish the diamond. 749 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 750 RegionNode* region = new RegionNode(3); 751 region->init_req(1, kit.control()); 752 region->init_req(2, slow_map->control()); 753 kit.set_control(gvn.transform(region)); 754 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 755 iophi->set_req(2, slow_map->i_o()); 756 kit.set_i_o(gvn.transform(iophi)); 757 // Merge memory 758 kit.merge_memory(slow_map->merged_memory(), region, 2); 759 // Transform new memory Phis. 760 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 761 Node* phi = mms.memory(); 762 if (phi->is_Phi() && phi->in(0) == region) { 763 mms.set_memory(gvn.transform(phi)); 764 } 765 } 766 uint tos = kit.jvms()->stkoff() + kit.sp(); 767 uint limit = slow_map->req(); 768 for (uint i = TypeFunc::Parms; i < limit; i++) { 769 // Skip unused stack slots; fast forward to monoff(); 770 if (i == tos) { 771 i = kit.jvms()->monoff(); 772 if( i >= limit ) break; 773 } 774 Node* m = kit.map()->in(i); 775 Node* n = slow_map->in(i); 776 if (m != n) { 777 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 778 Node* phi = PhiNode::make(region, m, t); 779 phi->set_req(2, n); 780 kit.map()->set_req(i, gvn.transform(phi)); 781 } 782 } 783 return kit.transfer_exceptions_into_jvms(); 784 } 785 786 787 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 788 assert(callee->is_method_handle_intrinsic() || 789 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch"); 790 bool input_not_const; 791 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); 792 Compile* C = Compile::current(); 793 if (cg != NULL) { 794 if (!delayed_forbidden && AlwaysIncrementalInline) { 795 return CallGenerator::for_late_inline(callee, cg); 796 } else { 797 return cg; 798 } 799 } 800 int bci = jvms->bci(); 801 ciCallProfile profile = caller->call_profile_at_bci(bci); 802 int call_site_count = caller->scale_count(profile.count()); 803 804 if (IncrementalInline && call_site_count > 0 && 805 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { 806 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 807 } else { 808 // Out-of-line call. 809 return CallGenerator::for_direct_call(callee); 810 } 811 } 812 813 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { 814 GraphKit kit(jvms); 815 PhaseGVN& gvn = kit.gvn(); 816 Compile* C = kit.C; 817 vmIntrinsics::ID iid = callee->intrinsic_id(); 818 input_not_const = true; 819 switch (iid) { 820 case vmIntrinsics::_invokeBasic: 821 { 822 // Get MethodHandle receiver: 823 Node* receiver = kit.argument(0); 824 if (receiver->Opcode() == Op_ConP) { 825 input_not_const = false; 826 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 827 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 828 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove 829 const int vtable_index = Method::invalid_vtable_index; 830 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true); 831 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 832 return cg; 833 } else { 834 const char* msg = "receiver not constant"; 835 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); 836 C->log_inline_failure(msg); 837 } 838 } 839 break; 840 841 case vmIntrinsics::_linkToVirtual: 842 case vmIntrinsics::_linkToStatic: 843 case vmIntrinsics::_linkToSpecial: 844 case vmIntrinsics::_linkToInterface: 845 { 846 // Get MemberName argument: 847 Node* member_name = kit.argument(callee->arg_size() - 1); 848 if (member_name->Opcode() == Op_ConP) { 849 input_not_const = false; 850 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 851 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 852 853 // In lambda forms we erase signature types to avoid resolving issues 854 // involving class loaders. When we optimize a method handle invoke 855 // to a direct call we must cast the receiver and arguments to its 856 // actual types. 857 ciSignature* signature = target->signature(); 858 const int receiver_skip = target->is_static() ? 0 : 1; 859 // Cast receiver to its type. 860 if (!target->is_static()) { 861 Node* arg = kit.argument(0); 862 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 863 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); 864 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 865 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 866 kit.set_argument(0, cast_obj); 867 } 868 } 869 // Cast reference arguments to its type. 870 for (int i = 0, j = 0; i < signature->count(); i++) { 871 ciType* t = signature->type_at(i); 872 if (t->is_klass()) { 873 Node* arg = kit.argument(receiver_skip + j); 874 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 875 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 876 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 877 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 878 kit.set_argument(receiver_skip + j, cast_obj); 879 } 880 } 881 j += t->size(); // long and double take two slots 882 } 883 884 // Try to get the most accurate receiver type 885 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 886 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 887 int vtable_index = Method::invalid_vtable_index; 888 bool call_does_dispatch = false; 889 890 ciKlass* speculative_receiver_type = NULL; 891 if (is_virtual_or_interface) { 892 ciInstanceKlass* klass = target->holder(); 893 Node* receiver_node = kit.argument(0); 894 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 895 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 896 // optimize_virtual_call() takes 2 different holder 897 // arguments for a corner case that doesn't apply here (see 898 // Parse::do_call()) 899 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass, 900 target, receiver_type, is_virtual, 901 call_does_dispatch, vtable_index, // out-parameters 902 /*check_access=*/false); 903 // We lack profiling at this call but type speculation may 904 // provide us with a type 905 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; 906 } 907 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, /*allow_inline=*/true, PROB_ALWAYS, speculative_receiver_type, true, true); 908 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 909 return cg; 910 } else { 911 const char* msg = "member_name not constant"; 912 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); 913 C->log_inline_failure(msg); 914 } 915 } 916 break; 917 918 default: 919 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)); 920 break; 921 } 922 return NULL; 923 } 924 925 926 //------------------------PredicatedIntrinsicGenerator------------------------------ 927 // Internal class which handles all predicated Intrinsic calls. 928 class PredicatedIntrinsicGenerator : public CallGenerator { 929 CallGenerator* _intrinsic; 930 CallGenerator* _cg; 931 932 public: 933 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 934 CallGenerator* cg) 935 : CallGenerator(cg->method()) 936 { 937 _intrinsic = intrinsic; 938 _cg = cg; 939 } 940 941 virtual bool is_virtual() const { return true; } 942 virtual bool is_inlined() const { return true; } 943 virtual bool is_intrinsic() const { return true; } 944 945 virtual JVMState* generate(JVMState* jvms); 946 }; 947 948 949 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 950 CallGenerator* cg) { 951 return new PredicatedIntrinsicGenerator(intrinsic, cg); 952 } 953 954 955 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 956 // The code we want to generate here is: 957 // if (receiver == NULL) 958 // uncommon_Trap 959 // if (predicate(0)) 960 // do_intrinsic(0) 961 // else 962 // if (predicate(1)) 963 // do_intrinsic(1) 964 // ... 965 // else 966 // do_java_comp 967 968 GraphKit kit(jvms); 969 PhaseGVN& gvn = kit.gvn(); 970 971 CompileLog* log = kit.C->log(); 972 if (log != NULL) { 973 log->elem("predicated_intrinsic bci='%d' method='%d'", 974 jvms->bci(), log->identify(method())); 975 } 976 977 if (!method()->is_static()) { 978 // We need an explicit receiver null_check before checking its type in predicate. 979 // We share a map with the caller, so his JVMS gets adjusted. 980 Node* receiver = kit.null_check_receiver_before_call(method()); 981 if (kit.stopped()) { 982 return kit.transfer_exceptions_into_jvms(); 983 } 984 } 985 986 int n_predicates = _intrinsic->predicates_count(); 987 assert(n_predicates > 0, "sanity"); 988 989 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 990 991 // Region for normal compilation code if intrinsic failed. 992 Node* slow_region = new RegionNode(1); 993 994 int results = 0; 995 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 996 #ifdef ASSERT 997 JVMState* old_jvms = kit.jvms(); 998 SafePointNode* old_map = kit.map(); 999 Node* old_io = old_map->i_o(); 1000 Node* old_mem = old_map->memory(); 1001 Node* old_exc = old_map->next_exception(); 1002 #endif 1003 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 1004 #ifdef ASSERT 1005 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 1006 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 1007 SafePointNode* new_map = kit.map(); 1008 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 1009 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 1010 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 1011 #endif 1012 if (!kit.stopped()) { 1013 PreserveJVMState pjvms(&kit); 1014 // Generate intrinsic code: 1015 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 1016 if (new_jvms == NULL) { 1017 // Intrinsic failed, use normal compilation path for this predicate. 1018 slow_region->add_req(kit.control()); 1019 } else { 1020 kit.add_exception_states_from(new_jvms); 1021 kit.set_jvms(new_jvms); 1022 if (!kit.stopped()) { 1023 result_jvms[results++] = kit.jvms(); 1024 } 1025 } 1026 } 1027 if (else_ctrl == NULL) { 1028 else_ctrl = kit.C->top(); 1029 } 1030 kit.set_control(else_ctrl); 1031 } 1032 if (!kit.stopped()) { 1033 // Final 'else' after predicates. 1034 slow_region->add_req(kit.control()); 1035 } 1036 if (slow_region->req() > 1) { 1037 PreserveJVMState pjvms(&kit); 1038 // Generate normal compilation code: 1039 kit.set_control(gvn.transform(slow_region)); 1040 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1041 if (kit.failing()) 1042 return NULL; // might happen because of NodeCountInliningCutoff 1043 assert(new_jvms != NULL, "must be"); 1044 kit.add_exception_states_from(new_jvms); 1045 kit.set_jvms(new_jvms); 1046 if (!kit.stopped()) { 1047 result_jvms[results++] = kit.jvms(); 1048 } 1049 } 1050 1051 if (results == 0) { 1052 // All paths ended in uncommon traps. 1053 (void) kit.stop(); 1054 return kit.transfer_exceptions_into_jvms(); 1055 } 1056 1057 if (results == 1) { // Only one path 1058 kit.set_jvms(result_jvms[0]); 1059 return kit.transfer_exceptions_into_jvms(); 1060 } 1061 1062 // Merge all paths. 1063 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1064 RegionNode* region = new RegionNode(results + 1); 1065 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1066 for (int i = 0; i < results; i++) { 1067 JVMState* jvms = result_jvms[i]; 1068 int path = i + 1; 1069 SafePointNode* map = jvms->map(); 1070 region->init_req(path, map->control()); 1071 iophi->set_req(path, map->i_o()); 1072 if (i == 0) { 1073 kit.set_jvms(jvms); 1074 } else { 1075 kit.merge_memory(map->merged_memory(), region, path); 1076 } 1077 } 1078 kit.set_control(gvn.transform(region)); 1079 kit.set_i_o(gvn.transform(iophi)); 1080 // Transform new memory Phis. 1081 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1082 Node* phi = mms.memory(); 1083 if (phi->is_Phi() && phi->in(0) == region) { 1084 mms.set_memory(gvn.transform(phi)); 1085 } 1086 } 1087 1088 // Merge debug info. 1089 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1090 uint tos = kit.jvms()->stkoff() + kit.sp(); 1091 Node* map = kit.map(); 1092 uint limit = map->req(); 1093 for (uint i = TypeFunc::Parms; i < limit; i++) { 1094 // Skip unused stack slots; fast forward to monoff(); 1095 if (i == tos) { 1096 i = kit.jvms()->monoff(); 1097 if( i >= limit ) break; 1098 } 1099 Node* n = map->in(i); 1100 ins[0] = n; 1101 const Type* t = gvn.type(n); 1102 bool needs_phi = false; 1103 for (int j = 1; j < results; j++) { 1104 JVMState* jvms = result_jvms[j]; 1105 Node* jmap = jvms->map(); 1106 Node* m = NULL; 1107 if (jmap->req() > i) { 1108 m = jmap->in(i); 1109 if (m != n) { 1110 needs_phi = true; 1111 t = t->meet_speculative(gvn.type(m)); 1112 } 1113 } 1114 ins[j] = m; 1115 } 1116 if (needs_phi) { 1117 Node* phi = PhiNode::make(region, n, t); 1118 for (int j = 1; j < results; j++) { 1119 phi->set_req(j + 1, ins[j]); 1120 } 1121 map->set_req(i, gvn.transform(phi)); 1122 } 1123 } 1124 1125 return kit.transfer_exceptions_into_jvms(); 1126 } 1127 1128 //-------------------------UncommonTrapCallGenerator----------------------------- 1129 // Internal class which handles all out-of-line calls checking receiver type. 1130 class UncommonTrapCallGenerator : public CallGenerator { 1131 Deoptimization::DeoptReason _reason; 1132 Deoptimization::DeoptAction _action; 1133 1134 public: 1135 UncommonTrapCallGenerator(ciMethod* m, 1136 Deoptimization::DeoptReason reason, 1137 Deoptimization::DeoptAction action) 1138 : CallGenerator(m) 1139 { 1140 _reason = reason; 1141 _action = action; 1142 } 1143 1144 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1145 virtual bool is_trap() const { return true; } 1146 1147 virtual JVMState* generate(JVMState* jvms); 1148 }; 1149 1150 1151 CallGenerator* 1152 CallGenerator::for_uncommon_trap(ciMethod* m, 1153 Deoptimization::DeoptReason reason, 1154 Deoptimization::DeoptAction action) { 1155 return new UncommonTrapCallGenerator(m, reason, action); 1156 } 1157 1158 1159 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1160 GraphKit kit(jvms); 1161 kit.C->print_inlining_update(this); 1162 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1163 int nargs = method()->arg_size(); 1164 kit.inc_sp(nargs); 1165 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1166 if (_reason == Deoptimization::Reason_class_check && 1167 _action == Deoptimization::Action_maybe_recompile) { 1168 // Temp fix for 6529811 1169 // Don't allow uncommon_trap to override our decision to recompile in the event 1170 // of a class cast failure for a monomorphic call as it will never let us convert 1171 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1172 bool keep_exact_action = true; 1173 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1174 } else { 1175 kit.uncommon_trap(_reason, _action); 1176 } 1177 return kit.transfer_exceptions_into_jvms(); 1178 } 1179 1180 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1181 1182 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1183 1184 #define NODES_OVERHEAD_PER_METHOD (30.0) 1185 #define NODES_PER_BYTECODE (9.5) 1186 1187 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1188 int call_count = profile.count(); 1189 int code_size = call_method->code_size(); 1190 1191 // Expected execution count is based on the historical count: 1192 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1193 1194 // Expected profit from inlining, in units of simple call-overheads. 1195 _profit = 1.0; 1196 1197 // Expected work performed by the call in units of call-overheads. 1198 // %%% need an empirical curve fit for "work" (time in call) 1199 float bytecodes_per_call = 3; 1200 _work = 1.0 + code_size / bytecodes_per_call; 1201 1202 // Expected size of compilation graph: 1203 // -XX:+PrintParseStatistics once reported: 1204 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1205 // Histogram of 144298 parsed bytecodes: 1206 // %%% Need an better predictor for graph size. 1207 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1208 } 1209 1210 // is_cold: Return true if the node should never be inlined. 1211 // This is true if any of the key metrics are extreme. 1212 bool WarmCallInfo::is_cold() const { 1213 if (count() < WarmCallMinCount) return true; 1214 if (profit() < WarmCallMinProfit) return true; 1215 if (work() > WarmCallMaxWork) return true; 1216 if (size() > WarmCallMaxSize) return true; 1217 return false; 1218 } 1219 1220 // is_hot: Return true if the node should be inlined immediately. 1221 // This is true if any of the key metrics are extreme. 1222 bool WarmCallInfo::is_hot() const { 1223 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1224 if (count() >= HotCallCountThreshold) return true; 1225 if (profit() >= HotCallProfitThreshold) return true; 1226 if (work() <= HotCallTrivialWork) return true; 1227 if (size() <= HotCallTrivialSize) return true; 1228 return false; 1229 } 1230 1231 // compute_heat: 1232 float WarmCallInfo::compute_heat() const { 1233 assert(!is_cold(), "compute heat only on warm nodes"); 1234 assert(!is_hot(), "compute heat only on warm nodes"); 1235 int min_size = MAX2(0, (int)HotCallTrivialSize); 1236 int max_size = MIN2(500, (int)WarmCallMaxSize); 1237 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1238 float size_factor; 1239 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1240 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1241 else if (method_size < 0.5) size_factor = 1; // better than avg. 1242 else size_factor = 0.5; // worse than avg. 1243 return (count() * profit() * size_factor); 1244 } 1245 1246 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1247 assert(this != that, "compare only different WCIs"); 1248 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1249 if (this->heat() > that->heat()) return true; 1250 if (this->heat() < that->heat()) return false; 1251 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1252 // Equal heat. Break the tie some other way. 1253 if (!this->call() || !that->call()) return (address)this > (address)that; 1254 return this->call()->_idx > that->call()->_idx; 1255 } 1256 1257 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1258 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1259 1260 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1261 assert(next() == UNINIT_NEXT, "not yet on any list"); 1262 WarmCallInfo* prev_p = NULL; 1263 WarmCallInfo* next_p = head; 1264 while (next_p != NULL && next_p->warmer_than(this)) { 1265 prev_p = next_p; 1266 next_p = prev_p->next(); 1267 } 1268 // Install this between prev_p and next_p. 1269 this->set_next(next_p); 1270 if (prev_p == NULL) 1271 head = this; 1272 else 1273 prev_p->set_next(this); 1274 return head; 1275 } 1276 1277 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1278 WarmCallInfo* prev_p = NULL; 1279 WarmCallInfo* next_p = head; 1280 while (next_p != this) { 1281 assert(next_p != NULL, "this must be in the list somewhere"); 1282 prev_p = next_p; 1283 next_p = prev_p->next(); 1284 } 1285 next_p = this->next(); 1286 debug_only(this->set_next(UNINIT_NEXT)); 1287 // Remove this from between prev_p and next_p. 1288 if (prev_p == NULL) 1289 head = next_p; 1290 else 1291 prev_p->set_next(next_p); 1292 return head; 1293 } 1294 1295 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1296 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1297 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1298 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1299 1300 WarmCallInfo* WarmCallInfo::always_hot() { 1301 assert(_always_hot.is_hot(), "must always be hot"); 1302 return &_always_hot; 1303 } 1304 1305 WarmCallInfo* WarmCallInfo::always_cold() { 1306 assert(_always_cold.is_cold(), "must always be cold"); 1307 return &_always_cold; 1308 } 1309 1310 1311 #ifndef PRODUCT 1312 1313 void WarmCallInfo::print() const { 1314 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1315 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1316 count(), profit(), work(), size(), compute_heat(), next()); 1317 tty->cr(); 1318 if (call() != NULL) call()->dump(); 1319 } 1320 1321 void print_wci(WarmCallInfo* ci) { 1322 ci->print(); 1323 } 1324 1325 void WarmCallInfo::print_all() const { 1326 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1327 p->print(); 1328 } 1329 1330 int WarmCallInfo::count_all() const { 1331 int cnt = 0; 1332 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1333 cnt++; 1334 return cnt; 1335 } 1336 1337 #endif //PRODUCT