1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 44 45 // Utility function. 46 const TypeFunc* CallGenerator::tf() const { 47 return TypeFunc::make(method()); 48 } 49 50 //-----------------------------ParseGenerator--------------------------------- 51 // Internal class which handles all direct bytecode traversal. 52 class ParseGenerator : public InlineCallGenerator { 53 private: 54 bool _is_osr; 55 float _expected_uses; 56 57 public: 58 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 59 : InlineCallGenerator(method) 60 { 61 _is_osr = is_osr; 62 _expected_uses = expected_uses; 63 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 64 } 65 66 virtual bool is_parse() const { return true; } 67 virtual JVMState* generate(JVMState* jvms); 68 int is_osr() { return _is_osr; } 69 70 }; 71 72 JVMState* ParseGenerator::generate(JVMState* jvms) { 73 Compile* C = Compile::current(); 74 C->print_inlining_update(this); 75 76 if (is_osr()) { 77 // The JVMS for a OSR has a single argument (see its TypeFunc). 78 assert(jvms->depth() == 1, "no inline OSR"); 79 } 80 81 if (C->failing()) { 82 return NULL; // bailing out of the compile; do not try to parse 83 } 84 85 Parse parser(jvms, method(), _expected_uses); 86 // Grab signature for matching/allocation 87 #ifdef ASSERT 88 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 89 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 90 assert(C->env()->system_dictionary_modification_counter_changed(), 91 "Must invalidate if TypeFuncs differ"); 92 } 93 #endif 94 95 GraphKit& exits = parser.exits(); 96 97 if (C->failing()) { 98 while (exits.pop_exception_state() != NULL) ; 99 return NULL; 100 } 101 102 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 103 104 // Simply return the exit state of the parser, 105 // augmented by any exceptional states. 106 return exits.transfer_exceptions_into_jvms(); 107 } 108 109 //---------------------------DirectCallGenerator------------------------------ 110 // Internal class which handles all out-of-line calls w/o receiver type checks. 111 class DirectCallGenerator : public CallGenerator { 112 private: 113 CallStaticJavaNode* _call_node; 114 // Force separate memory and I/O projections for the exceptional 115 // paths to facilitate late inlinig. 116 bool _separate_io_proj; 117 118 public: 119 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 120 : CallGenerator(method), 121 _separate_io_proj(separate_io_proj) 122 { 123 } 124 virtual JVMState* generate(JVMState* jvms); 125 126 CallStaticJavaNode* call_node() const { return _call_node; } 127 }; 128 129 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 130 GraphKit kit(jvms); 131 kit.C->print_inlining_update(this); 132 bool is_static = method()->is_static(); 133 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 134 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 135 136 if (kit.C->log() != NULL) { 137 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 138 } 139 140 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 141 _call_node = call; // Save the call node in case we need it later 142 if (!is_static) { 143 // Make an explicit receiver null_check as part of this call. 144 // Since we share a map with the caller, his JVMS gets adjusted. 145 kit.null_check_receiver_before_call(method()); 146 if (kit.stopped()) { 147 // And dump it back to the caller, decorated with any exceptions: 148 return kit.transfer_exceptions_into_jvms(); 149 } 150 // Mark the call node as virtual, sort of: 151 call->set_optimized_virtual(true); 152 if (method()->is_method_handle_intrinsic() || 153 method()->is_compiled_lambda_form()) { 154 call->set_method_handle_invoke(true); 155 } 156 } 157 kit.set_arguments_for_java_call(call); 158 kit.set_edges_for_java_call(call, false, _separate_io_proj); 159 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 160 kit.push_node(method()->return_type()->basic_type(), ret); 161 return kit.transfer_exceptions_into_jvms(); 162 } 163 164 //--------------------------VirtualCallGenerator------------------------------ 165 // Internal class which handles all out-of-line calls checking receiver type. 166 class VirtualCallGenerator : public CallGenerator { 167 private: 168 int _vtable_index; 169 public: 170 VirtualCallGenerator(ciMethod* method, int vtable_index) 171 : CallGenerator(method), _vtable_index(vtable_index) 172 { 173 assert(vtable_index == Method::invalid_vtable_index || 174 vtable_index >= 0, "either invalid or usable"); 175 } 176 virtual bool is_virtual() const { return true; } 177 virtual JVMState* generate(JVMState* jvms); 178 }; 179 180 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 181 GraphKit kit(jvms); 182 Node* receiver = kit.argument(0); 183 184 kit.C->print_inlining_update(this); 185 186 if (kit.C->log() != NULL) { 187 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 188 } 189 190 // If the receiver is a constant null, do not torture the system 191 // by attempting to call through it. The compile will proceed 192 // correctly, but may bail out in final_graph_reshaping, because 193 // the call instruction will have a seemingly deficient out-count. 194 // (The bailout says something misleading about an "infinite loop".) 195 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 196 kit.inc_sp(method()->arg_size()); // restore arguments 197 kit.uncommon_trap(Deoptimization::Reason_null_check, 198 Deoptimization::Action_none, 199 NULL, "null receiver"); 200 return kit.transfer_exceptions_into_jvms(); 201 } 202 203 // Ideally we would unconditionally do a null check here and let it 204 // be converted to an implicit check based on profile information. 205 // However currently the conversion to implicit null checks in 206 // Block::implicit_null_check() only looks for loads and stores, not calls. 207 ciMethod *caller = kit.method(); 208 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 209 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 210 ((ImplicitNullCheckThreshold > 0) && caller_md && 211 (caller_md->trap_count(Deoptimization::Reason_null_check) 212 >= (uint)ImplicitNullCheckThreshold))) { 213 // Make an explicit receiver null_check as part of this call. 214 // Since we share a map with the caller, his JVMS gets adjusted. 215 receiver = kit.null_check_receiver_before_call(method()); 216 if (kit.stopped()) { 217 // And dump it back to the caller, decorated with any exceptions: 218 return kit.transfer_exceptions_into_jvms(); 219 } 220 } 221 222 assert(!method()->is_static(), "virtual call must not be to static"); 223 assert(!method()->is_final(), "virtual call should not be to final"); 224 assert(!method()->is_private(), "virtual call should not be to private"); 225 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 226 "no vtable calls if +UseInlineCaches "); 227 address target = SharedRuntime::get_resolve_virtual_call_stub(); 228 // Normal inline cache used for call 229 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 230 kit.set_arguments_for_java_call(call); 231 kit.set_edges_for_java_call(call); 232 Node* ret = kit.set_results_for_java_call(call); 233 kit.push_node(method()->return_type()->basic_type(), ret); 234 235 // Represent the effect of an implicit receiver null_check 236 // as part of this call. Since we share a map with the caller, 237 // his JVMS gets adjusted. 238 kit.cast_not_null(receiver); 239 return kit.transfer_exceptions_into_jvms(); 240 } 241 242 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 243 if (InlineTree::check_can_parse(m) != NULL) return NULL; 244 return new ParseGenerator(m, expected_uses); 245 } 246 247 // As a special case, the JVMS passed to this CallGenerator is 248 // for the method execution already in progress, not just the JVMS 249 // of the caller. Thus, this CallGenerator cannot be mixed with others! 250 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 251 if (InlineTree::check_can_parse(m) != NULL) return NULL; 252 float past_uses = m->interpreter_invocation_count(); 253 float expected_uses = past_uses; 254 return new ParseGenerator(m, expected_uses, true); 255 } 256 257 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 258 assert(!m->is_abstract(), "for_direct_call mismatch"); 259 return new DirectCallGenerator(m, separate_io_proj); 260 } 261 262 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 263 assert(!m->is_static(), "for_virtual_call mismatch"); 264 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 265 return new VirtualCallGenerator(m, vtable_index); 266 } 267 268 // Allow inlining decisions to be delayed 269 class LateInlineCallGenerator : public DirectCallGenerator { 270 private: 271 // unique id for log compilation 272 jlong _unique_id; 273 274 protected: 275 CallGenerator* _inline_cg; 276 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 277 278 public: 279 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 280 DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {} 281 282 virtual bool is_late_inline() const { return true; } 283 284 // Convert the CallStaticJava into an inline 285 virtual void do_late_inline(); 286 287 virtual JVMState* generate(JVMState* jvms) { 288 Compile *C = Compile::current(); 289 290 C->log_inline_id(this); 291 292 // Record that this call site should be revisited once the main 293 // parse is finished. 294 if (!is_mh_late_inline()) { 295 C->add_late_inline(this); 296 } 297 298 // Emit the CallStaticJava and request separate projections so 299 // that the late inlining logic can distinguish between fall 300 // through and exceptional uses of the memory and io projections 301 // as is done for allocations and macro expansion. 302 return DirectCallGenerator::generate(jvms); 303 } 304 305 virtual void print_inlining_late(const char* msg) { 306 CallNode* call = call_node(); 307 Compile* C = Compile::current(); 308 C->print_inlining_assert_ready(); 309 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 310 C->print_inlining_move_to(this); 311 C->print_inlining_update_delayed(this); 312 } 313 314 virtual void set_unique_id(jlong id) { 315 _unique_id = id; 316 } 317 318 virtual jlong unique_id() const { 319 return _unique_id; 320 } 321 }; 322 323 void LateInlineCallGenerator::do_late_inline() { 324 // Can't inline it 325 CallStaticJavaNode* call = call_node(); 326 if (call == NULL || call->outcnt() == 0 || 327 call->in(0) == NULL || call->in(0)->is_top()) { 328 return; 329 } 330 331 const TypeTuple *r = call->tf()->domain(); 332 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 333 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 334 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 335 return; 336 } 337 } 338 339 if (call->in(TypeFunc::Memory)->is_top()) { 340 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 341 return; 342 } 343 344 Compile* C = Compile::current(); 345 // Remove inlined methods from Compiler's lists. 346 if (call->is_macro()) { 347 C->remove_macro_node(call); 348 } 349 350 // Make a clone of the JVMState that appropriate to use for driving a parse 351 JVMState* old_jvms = call->jvms(); 352 JVMState* jvms = old_jvms->clone_shallow(C); 353 uint size = call->req(); 354 SafePointNode* map = new SafePointNode(size, jvms); 355 for (uint i1 = 0; i1 < size; i1++) { 356 map->init_req(i1, call->in(i1)); 357 } 358 359 // Make sure the state is a MergeMem for parsing. 360 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 361 Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory)); 362 C->initial_gvn()->set_type_bottom(mem); 363 map->set_req(TypeFunc::Memory, mem); 364 } 365 366 uint nargs = method()->arg_size(); 367 // blow away old call arguments 368 Node* top = C->top(); 369 for (uint i1 = 0; i1 < nargs; i1++) { 370 map->set_req(TypeFunc::Parms + i1, top); 371 } 372 jvms->set_map(map); 373 374 // Make enough space in the expression stack to transfer 375 // the incoming arguments and return value. 376 map->ensure_stack(jvms, jvms->method()->max_stack()); 377 for (uint i1 = 0; i1 < nargs; i1++) { 378 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); 379 } 380 381 C->print_inlining_assert_ready(); 382 383 C->print_inlining_move_to(this); 384 385 C->log_late_inline(this); 386 387 // This check is done here because for_method_handle_inline() method 388 // needs jvms for inlined state. 389 if (!do_late_inline_check(jvms)) { 390 map->disconnect_inputs(NULL, C); 391 return; 392 } 393 394 // Setup default node notes to be picked up by the inlining 395 Node_Notes* old_nn = C->node_notes_at(call->_idx); 396 if (old_nn != NULL) { 397 Node_Notes* entry_nn = old_nn->clone(C); 398 entry_nn->set_jvms(jvms); 399 C->set_default_node_notes(entry_nn); 400 } 401 402 // Now perform the inlining using the synthesized JVMState 403 JVMState* new_jvms = _inline_cg->generate(jvms); 404 if (new_jvms == NULL) return; // no change 405 if (C->failing()) return; 406 407 // Capture any exceptional control flow 408 GraphKit kit(new_jvms); 409 410 // Find the result object 411 Node* result = C->top(); 412 int result_size = method()->return_type()->size(); 413 if (result_size != 0 && !kit.stopped()) { 414 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 415 } 416 417 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 418 C->env()->notice_inlined_method(_inline_cg->method()); 419 C->set_inlining_progress(true); 420 421 kit.replace_call(call, result, true); 422 } 423 424 425 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 426 return new LateInlineCallGenerator(method, inline_cg); 427 } 428 429 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 430 ciMethod* _caller; 431 int _attempt; 432 bool _input_not_const; 433 434 virtual bool do_late_inline_check(JVMState* jvms); 435 virtual bool already_attempted() const { return _attempt > 0; } 436 437 public: 438 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 439 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 440 441 virtual bool is_mh_late_inline() const { return true; } 442 443 virtual JVMState* generate(JVMState* jvms) { 444 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 445 446 Compile* C = Compile::current(); 447 if (_input_not_const) { 448 // inlining won't be possible so no need to enqueue right now. 449 call_node()->set_generator(this); 450 } else { 451 C->add_late_inline(this); 452 } 453 return new_jvms; 454 } 455 }; 456 457 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 458 459 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); 460 461 Compile::current()->print_inlining_update_delayed(this); 462 463 if (!_input_not_const) { 464 _attempt++; 465 } 466 467 if (cg != NULL) { 468 assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining"); 469 _inline_cg = cg; 470 Compile::current()->dec_number_of_mh_late_inlines(); 471 return true; 472 } 473 474 call_node()->set_generator(this); 475 return false; 476 } 477 478 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 479 Compile::current()->inc_number_of_mh_late_inlines(); 480 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 481 return cg; 482 } 483 484 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 485 486 public: 487 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 488 LateInlineCallGenerator(method, inline_cg) {} 489 490 virtual JVMState* generate(JVMState* jvms) { 491 Compile *C = Compile::current(); 492 493 C->log_inline_id(this); 494 495 C->add_string_late_inline(this); 496 497 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 498 return new_jvms; 499 } 500 501 virtual bool is_string_late_inline() const { return true; } 502 }; 503 504 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 505 return new LateInlineStringCallGenerator(method, inline_cg); 506 } 507 508 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 509 510 public: 511 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 512 LateInlineCallGenerator(method, inline_cg) {} 513 514 virtual JVMState* generate(JVMState* jvms) { 515 Compile *C = Compile::current(); 516 517 C->log_inline_id(this); 518 519 C->add_boxing_late_inline(this); 520 521 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 522 return new_jvms; 523 } 524 }; 525 526 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 527 return new LateInlineBoxingCallGenerator(method, inline_cg); 528 } 529 530 //---------------------------WarmCallGenerator-------------------------------- 531 // Internal class which handles initial deferral of inlining decisions. 532 class WarmCallGenerator : public CallGenerator { 533 WarmCallInfo* _call_info; 534 CallGenerator* _if_cold; 535 CallGenerator* _if_hot; 536 bool _is_virtual; // caches virtuality of if_cold 537 bool _is_inline; // caches inline-ness of if_hot 538 539 public: 540 WarmCallGenerator(WarmCallInfo* ci, 541 CallGenerator* if_cold, 542 CallGenerator* if_hot) 543 : CallGenerator(if_cold->method()) 544 { 545 assert(method() == if_hot->method(), "consistent choices"); 546 _call_info = ci; 547 _if_cold = if_cold; 548 _if_hot = if_hot; 549 _is_virtual = if_cold->is_virtual(); 550 _is_inline = if_hot->is_inline(); 551 } 552 553 virtual bool is_inline() const { return _is_inline; } 554 virtual bool is_virtual() const { return _is_virtual; } 555 virtual bool is_deferred() const { return true; } 556 557 virtual JVMState* generate(JVMState* jvms); 558 }; 559 560 561 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 562 CallGenerator* if_cold, 563 CallGenerator* if_hot) { 564 return new WarmCallGenerator(ci, if_cold, if_hot); 565 } 566 567 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 568 Compile* C = Compile::current(); 569 C->print_inlining_update(this); 570 571 if (C->log() != NULL) { 572 C->log()->elem("warm_call bci='%d'", jvms->bci()); 573 } 574 jvms = _if_cold->generate(jvms); 575 if (jvms != NULL) { 576 Node* m = jvms->map()->control(); 577 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 578 if (m->is_Catch()) m = m->in(0); else m = C->top(); 579 if (m->is_Proj()) m = m->in(0); else m = C->top(); 580 if (m->is_CallJava()) { 581 _call_info->set_call(m->as_Call()); 582 _call_info->set_hot_cg(_if_hot); 583 #ifndef PRODUCT 584 if (PrintOpto || PrintOptoInlining) { 585 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 586 tty->print("WCI: "); 587 _call_info->print(); 588 } 589 #endif 590 _call_info->set_heat(_call_info->compute_heat()); 591 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 592 } 593 } 594 return jvms; 595 } 596 597 void WarmCallInfo::make_hot() { 598 Unimplemented(); 599 } 600 601 void WarmCallInfo::make_cold() { 602 // No action: Just dequeue. 603 } 604 605 606 //------------------------PredictedCallGenerator------------------------------ 607 // Internal class which handles all out-of-line calls checking receiver type. 608 class PredictedCallGenerator : public CallGenerator { 609 ciKlass* _predicted_receiver; 610 CallGenerator* _if_missed; 611 CallGenerator* _if_hit; 612 float _hit_prob; 613 614 public: 615 PredictedCallGenerator(ciKlass* predicted_receiver, 616 CallGenerator* if_missed, 617 CallGenerator* if_hit, float hit_prob) 618 : CallGenerator(if_missed->method()) 619 { 620 // The call profile data may predict the hit_prob as extreme as 0 or 1. 621 // Remove the extremes values from the range. 622 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 623 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 624 625 _predicted_receiver = predicted_receiver; 626 _if_missed = if_missed; 627 _if_hit = if_hit; 628 _hit_prob = hit_prob; 629 } 630 631 virtual bool is_virtual() const { return true; } 632 virtual bool is_inline() const { return _if_hit->is_inline(); } 633 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 634 635 virtual JVMState* generate(JVMState* jvms); 636 }; 637 638 639 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 640 CallGenerator* if_missed, 641 CallGenerator* if_hit, 642 float hit_prob) { 643 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 644 } 645 646 647 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 648 GraphKit kit(jvms); 649 kit.C->print_inlining_update(this); 650 PhaseGVN& gvn = kit.gvn(); 651 // We need an explicit receiver null_check before checking its type. 652 // We share a map with the caller, so his JVMS gets adjusted. 653 Node* receiver = kit.argument(0); 654 CompileLog* log = kit.C->log(); 655 if (log != NULL) { 656 log->elem("predicted_call bci='%d' klass='%d'", 657 jvms->bci(), log->identify(_predicted_receiver)); 658 } 659 660 receiver = kit.null_check_receiver_before_call(method()); 661 if (kit.stopped()) { 662 return kit.transfer_exceptions_into_jvms(); 663 } 664 665 // Make a copy of the replaced nodes in case we need to restore them 666 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 667 replaced_nodes.clone(); 668 669 Node* exact_receiver = receiver; // will get updated in place... 670 Node* slow_ctl = kit.type_check_receiver(receiver, 671 _predicted_receiver, _hit_prob, 672 &exact_receiver); 673 674 SafePointNode* slow_map = NULL; 675 JVMState* slow_jvms; 676 { PreserveJVMState pjvms(&kit); 677 kit.set_control(slow_ctl); 678 if (!kit.stopped()) { 679 slow_jvms = _if_missed->generate(kit.sync_jvms()); 680 if (kit.failing()) 681 return NULL; // might happen because of NodeCountInliningCutoff 682 assert(slow_jvms != NULL, "must be"); 683 kit.add_exception_states_from(slow_jvms); 684 kit.set_map(slow_jvms->map()); 685 if (!kit.stopped()) 686 slow_map = kit.stop(); 687 } 688 } 689 690 if (kit.stopped()) { 691 // Instance exactly does not matches the desired type. 692 kit.set_jvms(slow_jvms); 693 return kit.transfer_exceptions_into_jvms(); 694 } 695 696 // fall through if the instance exactly matches the desired type 697 kit.replace_in_map(receiver, exact_receiver); 698 699 // Make the hot call: 700 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 701 if (new_jvms == NULL) { 702 // Inline failed, so make a direct call. 703 assert(_if_hit->is_inline(), "must have been a failed inline"); 704 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 705 new_jvms = cg->generate(kit.sync_jvms()); 706 } 707 kit.add_exception_states_from(new_jvms); 708 kit.set_jvms(new_jvms); 709 710 // Need to merge slow and fast? 711 if (slow_map == NULL) { 712 // The fast path is the only path remaining. 713 return kit.transfer_exceptions_into_jvms(); 714 } 715 716 if (kit.stopped()) { 717 // Inlined method threw an exception, so it's just the slow path after all. 718 kit.set_jvms(slow_jvms); 719 return kit.transfer_exceptions_into_jvms(); 720 } 721 722 // There are 2 branches and the replaced nodes are only valid on 723 // one: restore the replaced nodes to what they were before the 724 // branch. 725 kit.map()->set_replaced_nodes(replaced_nodes); 726 727 // Finish the diamond. 728 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 729 RegionNode* region = new RegionNode(3); 730 region->init_req(1, kit.control()); 731 region->init_req(2, slow_map->control()); 732 kit.set_control(gvn.transform(region)); 733 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 734 iophi->set_req(2, slow_map->i_o()); 735 kit.set_i_o(gvn.transform(iophi)); 736 // Merge memory 737 kit.merge_memory(slow_map->merged_memory(), region, 2); 738 // Transform new memory Phis. 739 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 740 Node* phi = mms.memory(); 741 if (phi->is_Phi() && phi->in(0) == region) { 742 mms.set_memory(gvn.transform(phi)); 743 } 744 } 745 uint tos = kit.jvms()->stkoff() + kit.sp(); 746 uint limit = slow_map->req(); 747 for (uint i = TypeFunc::Parms; i < limit; i++) { 748 // Skip unused stack slots; fast forward to monoff(); 749 if (i == tos) { 750 i = kit.jvms()->monoff(); 751 if( i >= limit ) break; 752 } 753 Node* m = kit.map()->in(i); 754 Node* n = slow_map->in(i); 755 if (m != n) { 756 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 757 Node* phi = PhiNode::make(region, m, t); 758 phi->set_req(2, n); 759 kit.map()->set_req(i, gvn.transform(phi)); 760 } 761 } 762 return kit.transfer_exceptions_into_jvms(); 763 } 764 765 766 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 767 assert(callee->is_method_handle_intrinsic() || 768 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch"); 769 bool input_not_const; 770 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); 771 Compile* C = Compile::current(); 772 if (cg != NULL) { 773 if (!delayed_forbidden && AlwaysIncrementalInline) { 774 return CallGenerator::for_late_inline(callee, cg); 775 } else { 776 return cg; 777 } 778 } 779 int bci = jvms->bci(); 780 ciCallProfile profile = caller->call_profile_at_bci(bci); 781 int call_site_count = caller->scale_count(profile.count()); 782 783 if (IncrementalInline && call_site_count > 0 && 784 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { 785 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 786 } else { 787 // Out-of-line call. 788 return CallGenerator::for_direct_call(callee); 789 } 790 } 791 792 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { 793 GraphKit kit(jvms); 794 PhaseGVN& gvn = kit.gvn(); 795 Compile* C = kit.C; 796 vmIntrinsics::ID iid = callee->intrinsic_id(); 797 input_not_const = true; 798 switch (iid) { 799 case vmIntrinsics::_invokeBasic: 800 { 801 // Get MethodHandle receiver: 802 Node* receiver = kit.argument(0); 803 if (receiver->Opcode() == Op_ConP) { 804 input_not_const = false; 805 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 806 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 807 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove 808 const int vtable_index = Method::invalid_vtable_index; 809 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true); 810 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 811 if (cg != NULL && cg->is_inline()) 812 return cg; 813 } else { 814 const char* msg = "receiver not constant"; 815 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); 816 C->log_inline_failure(msg); 817 } 818 } 819 break; 820 821 case vmIntrinsics::_linkToVirtual: 822 case vmIntrinsics::_linkToStatic: 823 case vmIntrinsics::_linkToSpecial: 824 case vmIntrinsics::_linkToInterface: 825 { 826 // Get MemberName argument: 827 Node* member_name = kit.argument(callee->arg_size() - 1); 828 if (member_name->Opcode() == Op_ConP) { 829 input_not_const = false; 830 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 831 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 832 833 // In lamda forms we erase signature types to avoid resolving issues 834 // involving class loaders. When we optimize a method handle invoke 835 // to a direct call we must cast the receiver and arguments to its 836 // actual types. 837 ciSignature* signature = target->signature(); 838 const int receiver_skip = target->is_static() ? 0 : 1; 839 // Cast receiver to its type. 840 if (!target->is_static()) { 841 Node* arg = kit.argument(0); 842 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 843 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); 844 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 845 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 846 kit.set_argument(0, cast_obj); 847 } 848 } 849 // Cast reference arguments to its type. 850 for (int i = 0; i < signature->count(); i++) { 851 ciType* t = signature->type_at(i); 852 if (t->is_klass()) { 853 Node* arg = kit.argument(receiver_skip + i); 854 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 855 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 856 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 857 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 858 kit.set_argument(receiver_skip + i, cast_obj); 859 } 860 } 861 } 862 863 // Try to get the most accurate receiver type 864 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 865 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 866 int vtable_index = Method::invalid_vtable_index; 867 bool call_does_dispatch = false; 868 869 ciKlass* speculative_receiver_type = NULL; 870 if (is_virtual_or_interface) { 871 ciInstanceKlass* klass = target->holder(); 872 Node* receiver_node = kit.argument(0); 873 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 874 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 875 // optimize_virtual_call() takes 2 different holder 876 // arguments for a corner case that doesn't apply here (see 877 // Parse::do_call()) 878 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass, 879 target, receiver_type, is_virtual, 880 call_does_dispatch, vtable_index); // out-parameters 881 // We lack profiling at this call but type speculation may 882 // provide us with a type 883 speculative_receiver_type = receiver_type->speculative_type(); 884 } 885 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true); 886 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 887 if (cg != NULL && cg->is_inline()) 888 return cg; 889 } else { 890 const char* msg = "member_name not constant"; 891 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); 892 C->log_inline_failure(msg); 893 } 894 } 895 break; 896 897 default: 898 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); 899 break; 900 } 901 return NULL; 902 } 903 904 905 //------------------------PredicatedIntrinsicGenerator------------------------------ 906 // Internal class which handles all predicated Intrinsic calls. 907 class PredicatedIntrinsicGenerator : public CallGenerator { 908 CallGenerator* _intrinsic; 909 CallGenerator* _cg; 910 911 public: 912 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 913 CallGenerator* cg) 914 : CallGenerator(cg->method()) 915 { 916 _intrinsic = intrinsic; 917 _cg = cg; 918 } 919 920 virtual bool is_virtual() const { return true; } 921 virtual bool is_inlined() const { return true; } 922 virtual bool is_intrinsic() const { return true; } 923 924 virtual JVMState* generate(JVMState* jvms); 925 }; 926 927 928 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 929 CallGenerator* cg) { 930 return new PredicatedIntrinsicGenerator(intrinsic, cg); 931 } 932 933 934 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 935 // The code we want to generate here is: 936 // if (receiver == NULL) 937 // uncommon_Trap 938 // if (predicate(0)) 939 // do_intrinsic(0) 940 // else 941 // if (predicate(1)) 942 // do_intrinsic(1) 943 // ... 944 // else 945 // do_java_comp 946 947 GraphKit kit(jvms); 948 PhaseGVN& gvn = kit.gvn(); 949 950 CompileLog* log = kit.C->log(); 951 if (log != NULL) { 952 log->elem("predicated_intrinsic bci='%d' method='%d'", 953 jvms->bci(), log->identify(method())); 954 } 955 956 if (!method()->is_static()) { 957 // We need an explicit receiver null_check before checking its type in predicate. 958 // We share a map with the caller, so his JVMS gets adjusted. 959 Node* receiver = kit.null_check_receiver_before_call(method()); 960 if (kit.stopped()) { 961 return kit.transfer_exceptions_into_jvms(); 962 } 963 } 964 965 int n_predicates = _intrinsic->predicates_count(); 966 assert(n_predicates > 0, "sanity"); 967 968 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 969 970 // Region for normal compilation code if intrinsic failed. 971 Node* slow_region = new RegionNode(1); 972 973 int results = 0; 974 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 975 #ifdef ASSERT 976 JVMState* old_jvms = kit.jvms(); 977 SafePointNode* old_map = kit.map(); 978 Node* old_io = old_map->i_o(); 979 Node* old_mem = old_map->memory(); 980 Node* old_exc = old_map->next_exception(); 981 #endif 982 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 983 #ifdef ASSERT 984 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 985 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 986 SafePointNode* new_map = kit.map(); 987 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 988 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 989 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 990 #endif 991 if (!kit.stopped()) { 992 PreserveJVMState pjvms(&kit); 993 // Generate intrinsic code: 994 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 995 if (new_jvms == NULL) { 996 // Intrinsic failed, use normal compilation path for this predicate. 997 slow_region->add_req(kit.control()); 998 } else { 999 kit.add_exception_states_from(new_jvms); 1000 kit.set_jvms(new_jvms); 1001 if (!kit.stopped()) { 1002 result_jvms[results++] = kit.jvms(); 1003 } 1004 } 1005 } 1006 if (else_ctrl == NULL) { 1007 else_ctrl = kit.C->top(); 1008 } 1009 kit.set_control(else_ctrl); 1010 } 1011 if (!kit.stopped()) { 1012 // Final 'else' after predicates. 1013 slow_region->add_req(kit.control()); 1014 } 1015 if (slow_region->req() > 1) { 1016 PreserveJVMState pjvms(&kit); 1017 // Generate normal compilation code: 1018 kit.set_control(gvn.transform(slow_region)); 1019 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1020 if (kit.failing()) 1021 return NULL; // might happen because of NodeCountInliningCutoff 1022 assert(new_jvms != NULL, "must be"); 1023 kit.add_exception_states_from(new_jvms); 1024 kit.set_jvms(new_jvms); 1025 if (!kit.stopped()) { 1026 result_jvms[results++] = kit.jvms(); 1027 } 1028 } 1029 1030 if (results == 0) { 1031 // All paths ended in uncommon traps. 1032 (void) kit.stop(); 1033 return kit.transfer_exceptions_into_jvms(); 1034 } 1035 1036 if (results == 1) { // Only one path 1037 kit.set_jvms(result_jvms[0]); 1038 return kit.transfer_exceptions_into_jvms(); 1039 } 1040 1041 // Merge all paths. 1042 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1043 RegionNode* region = new RegionNode(results + 1); 1044 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1045 for (int i = 0; i < results; i++) { 1046 JVMState* jvms = result_jvms[i]; 1047 int path = i + 1; 1048 SafePointNode* map = jvms->map(); 1049 region->init_req(path, map->control()); 1050 iophi->set_req(path, map->i_o()); 1051 if (i == 0) { 1052 kit.set_jvms(jvms); 1053 } else { 1054 kit.merge_memory(map->merged_memory(), region, path); 1055 } 1056 } 1057 kit.set_control(gvn.transform(region)); 1058 kit.set_i_o(gvn.transform(iophi)); 1059 // Transform new memory Phis. 1060 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1061 Node* phi = mms.memory(); 1062 if (phi->is_Phi() && phi->in(0) == region) { 1063 mms.set_memory(gvn.transform(phi)); 1064 } 1065 } 1066 1067 // Merge debug info. 1068 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1069 uint tos = kit.jvms()->stkoff() + kit.sp(); 1070 Node* map = kit.map(); 1071 uint limit = map->req(); 1072 for (uint i = TypeFunc::Parms; i < limit; i++) { 1073 // Skip unused stack slots; fast forward to monoff(); 1074 if (i == tos) { 1075 i = kit.jvms()->monoff(); 1076 if( i >= limit ) break; 1077 } 1078 Node* n = map->in(i); 1079 ins[0] = n; 1080 const Type* t = gvn.type(n); 1081 bool needs_phi = false; 1082 for (int j = 1; j < results; j++) { 1083 JVMState* jvms = result_jvms[j]; 1084 Node* jmap = jvms->map(); 1085 Node* m = NULL; 1086 if (jmap->req() > i) { 1087 m = jmap->in(i); 1088 if (m != n) { 1089 needs_phi = true; 1090 t = t->meet_speculative(gvn.type(m)); 1091 } 1092 } 1093 ins[j] = m; 1094 } 1095 if (needs_phi) { 1096 Node* phi = PhiNode::make(region, n, t); 1097 for (int j = 1; j < results; j++) { 1098 phi->set_req(j + 1, ins[j]); 1099 } 1100 map->set_req(i, gvn.transform(phi)); 1101 } 1102 } 1103 1104 return kit.transfer_exceptions_into_jvms(); 1105 } 1106 1107 //-------------------------UncommonTrapCallGenerator----------------------------- 1108 // Internal class which handles all out-of-line calls checking receiver type. 1109 class UncommonTrapCallGenerator : public CallGenerator { 1110 Deoptimization::DeoptReason _reason; 1111 Deoptimization::DeoptAction _action; 1112 1113 public: 1114 UncommonTrapCallGenerator(ciMethod* m, 1115 Deoptimization::DeoptReason reason, 1116 Deoptimization::DeoptAction action) 1117 : CallGenerator(m) 1118 { 1119 _reason = reason; 1120 _action = action; 1121 } 1122 1123 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1124 virtual bool is_trap() const { return true; } 1125 1126 virtual JVMState* generate(JVMState* jvms); 1127 }; 1128 1129 1130 CallGenerator* 1131 CallGenerator::for_uncommon_trap(ciMethod* m, 1132 Deoptimization::DeoptReason reason, 1133 Deoptimization::DeoptAction action) { 1134 return new UncommonTrapCallGenerator(m, reason, action); 1135 } 1136 1137 1138 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1139 GraphKit kit(jvms); 1140 kit.C->print_inlining_update(this); 1141 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1142 int nargs = method()->arg_size(); 1143 kit.inc_sp(nargs); 1144 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1145 if (_reason == Deoptimization::Reason_class_check && 1146 _action == Deoptimization::Action_maybe_recompile) { 1147 // Temp fix for 6529811 1148 // Don't allow uncommon_trap to override our decision to recompile in the event 1149 // of a class cast failure for a monomorphic call as it will never let us convert 1150 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1151 bool keep_exact_action = true; 1152 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1153 } else { 1154 kit.uncommon_trap(_reason, _action); 1155 } 1156 return kit.transfer_exceptions_into_jvms(); 1157 } 1158 1159 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1160 1161 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1162 1163 #define NODES_OVERHEAD_PER_METHOD (30.0) 1164 #define NODES_PER_BYTECODE (9.5) 1165 1166 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1167 int call_count = profile.count(); 1168 int code_size = call_method->code_size(); 1169 1170 // Expected execution count is based on the historical count: 1171 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1172 1173 // Expected profit from inlining, in units of simple call-overheads. 1174 _profit = 1.0; 1175 1176 // Expected work performed by the call in units of call-overheads. 1177 // %%% need an empirical curve fit for "work" (time in call) 1178 float bytecodes_per_call = 3; 1179 _work = 1.0 + code_size / bytecodes_per_call; 1180 1181 // Expected size of compilation graph: 1182 // -XX:+PrintParseStatistics once reported: 1183 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1184 // Histogram of 144298 parsed bytecodes: 1185 // %%% Need an better predictor for graph size. 1186 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1187 } 1188 1189 // is_cold: Return true if the node should never be inlined. 1190 // This is true if any of the key metrics are extreme. 1191 bool WarmCallInfo::is_cold() const { 1192 if (count() < WarmCallMinCount) return true; 1193 if (profit() < WarmCallMinProfit) return true; 1194 if (work() > WarmCallMaxWork) return true; 1195 if (size() > WarmCallMaxSize) return true; 1196 return false; 1197 } 1198 1199 // is_hot: Return true if the node should be inlined immediately. 1200 // This is true if any of the key metrics are extreme. 1201 bool WarmCallInfo::is_hot() const { 1202 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1203 if (count() >= HotCallCountThreshold) return true; 1204 if (profit() >= HotCallProfitThreshold) return true; 1205 if (work() <= HotCallTrivialWork) return true; 1206 if (size() <= HotCallTrivialSize) return true; 1207 return false; 1208 } 1209 1210 // compute_heat: 1211 float WarmCallInfo::compute_heat() const { 1212 assert(!is_cold(), "compute heat only on warm nodes"); 1213 assert(!is_hot(), "compute heat only on warm nodes"); 1214 int min_size = MAX2(0, (int)HotCallTrivialSize); 1215 int max_size = MIN2(500, (int)WarmCallMaxSize); 1216 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1217 float size_factor; 1218 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1219 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1220 else if (method_size < 0.5) size_factor = 1; // better than avg. 1221 else size_factor = 0.5; // worse than avg. 1222 return (count() * profit() * size_factor); 1223 } 1224 1225 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1226 assert(this != that, "compare only different WCIs"); 1227 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1228 if (this->heat() > that->heat()) return true; 1229 if (this->heat() < that->heat()) return false; 1230 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1231 // Equal heat. Break the tie some other way. 1232 if (!this->call() || !that->call()) return (address)this > (address)that; 1233 return this->call()->_idx > that->call()->_idx; 1234 } 1235 1236 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1237 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1238 1239 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1240 assert(next() == UNINIT_NEXT, "not yet on any list"); 1241 WarmCallInfo* prev_p = NULL; 1242 WarmCallInfo* next_p = head; 1243 while (next_p != NULL && next_p->warmer_than(this)) { 1244 prev_p = next_p; 1245 next_p = prev_p->next(); 1246 } 1247 // Install this between prev_p and next_p. 1248 this->set_next(next_p); 1249 if (prev_p == NULL) 1250 head = this; 1251 else 1252 prev_p->set_next(this); 1253 return head; 1254 } 1255 1256 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1257 WarmCallInfo* prev_p = NULL; 1258 WarmCallInfo* next_p = head; 1259 while (next_p != this) { 1260 assert(next_p != NULL, "this must be in the list somewhere"); 1261 prev_p = next_p; 1262 next_p = prev_p->next(); 1263 } 1264 next_p = this->next(); 1265 debug_only(this->set_next(UNINIT_NEXT)); 1266 // Remove this from between prev_p and next_p. 1267 if (prev_p == NULL) 1268 head = next_p; 1269 else 1270 prev_p->set_next(next_p); 1271 return head; 1272 } 1273 1274 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1275 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1276 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1277 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1278 1279 WarmCallInfo* WarmCallInfo::always_hot() { 1280 assert(_always_hot.is_hot(), "must always be hot"); 1281 return &_always_hot; 1282 } 1283 1284 WarmCallInfo* WarmCallInfo::always_cold() { 1285 assert(_always_cold.is_cold(), "must always be cold"); 1286 return &_always_cold; 1287 } 1288 1289 1290 #ifndef PRODUCT 1291 1292 void WarmCallInfo::print() const { 1293 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1294 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1295 count(), profit(), work(), size(), compute_heat(), next()); 1296 tty->cr(); 1297 if (call() != NULL) call()->dump(); 1298 } 1299 1300 void print_wci(WarmCallInfo* ci) { 1301 ci->print(); 1302 } 1303 1304 void WarmCallInfo::print_all() const { 1305 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1306 p->print(); 1307 } 1308 1309 int WarmCallInfo::count_all() const { 1310 int cnt = 0; 1311 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1312 cnt++; 1313 return cnt; 1314 } 1315 1316 #endif //PRODUCT