1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/cfgnode.hpp" 37 #include "opto/connode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 43 44 // Utility function. 45 const TypeFunc* CallGenerator::tf() const { 46 return TypeFunc::make(method()); 47 } 48 49 //-----------------------------ParseGenerator--------------------------------- 50 // Internal class which handles all direct bytecode traversal. 51 class ParseGenerator : public InlineCallGenerator { 52 private: 53 bool _is_osr; 54 float _expected_uses; 55 56 public: 57 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 58 : InlineCallGenerator(method) 59 { 60 _is_osr = is_osr; 61 _expected_uses = expected_uses; 62 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 63 } 64 65 virtual bool is_parse() const { return true; } 66 virtual JVMState* generate(JVMState* jvms); 67 int is_osr() { return _is_osr; } 68 69 }; 70 71 JVMState* ParseGenerator::generate(JVMState* jvms) { 72 Compile* C = Compile::current(); 73 74 if (is_osr()) { 75 // The JVMS for a OSR has a single argument (see its TypeFunc). 76 assert(jvms->depth() == 1, "no inline OSR"); 77 } 78 79 if (C->failing()) { 80 return NULL; // bailing out of the compile; do not try to parse 81 } 82 83 Parse parser(jvms, method(), _expected_uses); 84 // Grab signature for matching/allocation 85 #ifdef ASSERT 86 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 87 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 88 assert(C->env()->system_dictionary_modification_counter_changed(), 89 "Must invalidate if TypeFuncs differ"); 90 } 91 #endif 92 93 GraphKit& exits = parser.exits(); 94 95 if (C->failing()) { 96 while (exits.pop_exception_state() != NULL) ; 97 return NULL; 98 } 99 100 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 101 102 // Simply return the exit state of the parser, 103 // augmented by any exceptional states. 104 return exits.transfer_exceptions_into_jvms(); 105 } 106 107 //---------------------------DirectCallGenerator------------------------------ 108 // Internal class which handles all out-of-line calls w/o receiver type checks. 109 class DirectCallGenerator : public CallGenerator { 110 private: 111 CallStaticJavaNode* _call_node; 112 // Force separate memory and I/O projections for the exceptional 113 // paths to facilitate late inlinig. 114 bool _separate_io_proj; 115 116 public: 117 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 118 : CallGenerator(method), 119 _separate_io_proj(separate_io_proj) 120 { 121 } 122 virtual JVMState* generate(JVMState* jvms); 123 124 CallStaticJavaNode* call_node() const { return _call_node; } 125 }; 126 127 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 128 GraphKit kit(jvms); 129 bool is_static = method()->is_static(); 130 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 131 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 132 133 if (kit.C->log() != NULL) { 134 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 135 } 136 137 CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 138 _call_node = call; // Save the call node in case we need it later 139 if (!is_static) { 140 // Make an explicit receiver null_check as part of this call. 141 // Since we share a map with the caller, his JVMS gets adjusted. 142 kit.null_check_receiver_before_call(method()); 143 if (kit.stopped()) { 144 // And dump it back to the caller, decorated with any exceptions: 145 return kit.transfer_exceptions_into_jvms(); 146 } 147 // Mark the call node as virtual, sort of: 148 call->set_optimized_virtual(true); 149 if (method()->is_method_handle_intrinsic() || 150 method()->is_compiled_lambda_form()) { 151 call->set_method_handle_invoke(true); 152 } 153 } 154 kit.set_arguments_for_java_call(call); 155 kit.set_edges_for_java_call(call, false, _separate_io_proj); 156 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 157 kit.push_node(method()->return_type()->basic_type(), ret); 158 return kit.transfer_exceptions_into_jvms(); 159 } 160 161 //--------------------------VirtualCallGenerator------------------------------ 162 // Internal class which handles all out-of-line calls checking receiver type. 163 class VirtualCallGenerator : public CallGenerator { 164 private: 165 int _vtable_index; 166 public: 167 VirtualCallGenerator(ciMethod* method, int vtable_index) 168 : CallGenerator(method), _vtable_index(vtable_index) 169 { 170 assert(vtable_index == Method::invalid_vtable_index || 171 vtable_index >= 0, "either invalid or usable"); 172 } 173 virtual bool is_virtual() const { return true; } 174 virtual JVMState* generate(JVMState* jvms); 175 }; 176 177 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 178 GraphKit kit(jvms); 179 Node* receiver = kit.argument(0); 180 181 if (kit.C->log() != NULL) { 182 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 183 } 184 185 // If the receiver is a constant null, do not torture the system 186 // by attempting to call through it. The compile will proceed 187 // correctly, but may bail out in final_graph_reshaping, because 188 // the call instruction will have a seemingly deficient out-count. 189 // (The bailout says something misleading about an "infinite loop".) 190 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 191 kit.inc_sp(method()->arg_size()); // restore arguments 192 kit.uncommon_trap(Deoptimization::Reason_null_check, 193 Deoptimization::Action_none, 194 NULL, "null receiver"); 195 return kit.transfer_exceptions_into_jvms(); 196 } 197 198 // Ideally we would unconditionally do a null check here and let it 199 // be converted to an implicit check based on profile information. 200 // However currently the conversion to implicit null checks in 201 // Block::implicit_null_check() only looks for loads and stores, not calls. 202 ciMethod *caller = kit.method(); 203 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 204 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 205 ((ImplicitNullCheckThreshold > 0) && caller_md && 206 (caller_md->trap_count(Deoptimization::Reason_null_check) 207 >= (uint)ImplicitNullCheckThreshold))) { 208 // Make an explicit receiver null_check as part of this call. 209 // Since we share a map with the caller, his JVMS gets adjusted. 210 receiver = kit.null_check_receiver_before_call(method()); 211 if (kit.stopped()) { 212 // And dump it back to the caller, decorated with any exceptions: 213 return kit.transfer_exceptions_into_jvms(); 214 } 215 } 216 217 assert(!method()->is_static(), "virtual call must not be to static"); 218 assert(!method()->is_final(), "virtual call should not be to final"); 219 assert(!method()->is_private(), "virtual call should not be to private"); 220 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 221 "no vtable calls if +UseInlineCaches "); 222 address target = SharedRuntime::get_resolve_virtual_call_stub(); 223 // Normal inline cache used for call 224 CallDynamicJavaNode *call = new (kit.C) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 225 kit.set_arguments_for_java_call(call); 226 kit.set_edges_for_java_call(call); 227 Node* ret = kit.set_results_for_java_call(call); 228 kit.push_node(method()->return_type()->basic_type(), ret); 229 230 // Represent the effect of an implicit receiver null_check 231 // as part of this call. Since we share a map with the caller, 232 // his JVMS gets adjusted. 233 kit.cast_not_null(receiver); 234 return kit.transfer_exceptions_into_jvms(); 235 } 236 237 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 238 if (InlineTree::check_can_parse(m) != NULL) return NULL; 239 return new ParseGenerator(m, expected_uses); 240 } 241 242 // As a special case, the JVMS passed to this CallGenerator is 243 // for the method execution already in progress, not just the JVMS 244 // of the caller. Thus, this CallGenerator cannot be mixed with others! 245 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 246 if (InlineTree::check_can_parse(m) != NULL) return NULL; 247 float past_uses = m->interpreter_invocation_count(); 248 float expected_uses = past_uses; 249 return new ParseGenerator(m, expected_uses, true); 250 } 251 252 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 253 assert(!m->is_abstract(), "for_direct_call mismatch"); 254 return new DirectCallGenerator(m, separate_io_proj); 255 } 256 257 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 258 assert(!m->is_static(), "for_virtual_call mismatch"); 259 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 260 return new VirtualCallGenerator(m, vtable_index); 261 } 262 263 // Allow inlining decisions to be delayed 264 class LateInlineCallGenerator : public DirectCallGenerator { 265 protected: 266 CallGenerator* _inline_cg; 267 268 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 269 270 public: 271 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 272 DirectCallGenerator(method, true), _inline_cg(inline_cg) {} 273 274 virtual bool is_late_inline() const { return true; } 275 276 // Convert the CallStaticJava into an inline 277 virtual void do_late_inline(); 278 279 virtual JVMState* generate(JVMState* jvms) { 280 Compile *C = Compile::current(); 281 C->print_inlining_skip(this); 282 283 // Record that this call site should be revisited once the main 284 // parse is finished. 285 if (!is_mh_late_inline()) { 286 C->add_late_inline(this); 287 } 288 289 // Emit the CallStaticJava and request separate projections so 290 // that the late inlining logic can distinguish between fall 291 // through and exceptional uses of the memory and io projections 292 // as is done for allocations and macro expansion. 293 return DirectCallGenerator::generate(jvms); 294 } 295 296 virtual void print_inlining_late(const char* msg) { 297 CallNode* call = call_node(); 298 Compile* C = Compile::current(); 299 C->print_inlining_insert(this); 300 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 301 } 302 303 }; 304 305 void LateInlineCallGenerator::do_late_inline() { 306 // Can't inline it 307 CallStaticJavaNode* call = call_node(); 308 if (call == NULL || call->outcnt() == 0 || 309 call->in(0) == NULL || call->in(0)->is_top()) { 310 return; 311 } 312 313 const TypeTuple *r = call->tf()->domain(); 314 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 315 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 316 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 317 return; 318 } 319 } 320 321 if (call->in(TypeFunc::Memory)->is_top()) { 322 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 323 return; 324 } 325 326 Compile* C = Compile::current(); 327 // Remove inlined methods from Compiler's lists. 328 if (call->is_macro()) { 329 C->remove_macro_node(call); 330 } 331 332 // Make a clone of the JVMState that appropriate to use for driving a parse 333 JVMState* old_jvms = call->jvms(); 334 JVMState* jvms = old_jvms->clone_shallow(C); 335 uint size = call->req(); 336 SafePointNode* map = new (C) SafePointNode(size, jvms); 337 for (uint i1 = 0; i1 < size; i1++) { 338 map->init_req(i1, call->in(i1)); 339 } 340 341 // Make sure the state is a MergeMem for parsing. 342 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 343 Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory)); 344 C->initial_gvn()->set_type_bottom(mem); 345 map->set_req(TypeFunc::Memory, mem); 346 } 347 348 uint nargs = method()->arg_size(); 349 // blow away old call arguments 350 Node* top = C->top(); 351 for (uint i1 = 0; i1 < nargs; i1++) { 352 map->set_req(TypeFunc::Parms + i1, top); 353 } 354 jvms->set_map(map); 355 356 // Make enough space in the expression stack to transfer 357 // the incoming arguments and return value. 358 map->ensure_stack(jvms, jvms->method()->max_stack()); 359 for (uint i1 = 0; i1 < nargs; i1++) { 360 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); 361 } 362 363 // This check is done here because for_method_handle_inline() method 364 // needs jvms for inlined state. 365 if (!do_late_inline_check(jvms)) { 366 map->disconnect_inputs(NULL, C); 367 return; 368 } 369 370 C->print_inlining_insert(this); 371 372 CompileLog* log = C->log(); 373 if (log != NULL) { 374 log->head("late_inline method='%d'", log->identify(method())); 375 JVMState* p = jvms; 376 while (p != NULL) { 377 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 378 p = p->caller(); 379 } 380 log->tail("late_inline"); 381 } 382 383 // Setup default node notes to be picked up by the inlining 384 Node_Notes* old_nn = C->node_notes_at(call->_idx); 385 if (old_nn != NULL) { 386 Node_Notes* entry_nn = old_nn->clone(C); 387 entry_nn->set_jvms(jvms); 388 C->set_default_node_notes(entry_nn); 389 } 390 391 // Now perform the inling using the synthesized JVMState 392 JVMState* new_jvms = _inline_cg->generate(jvms); 393 if (new_jvms == NULL) return; // no change 394 if (C->failing()) return; 395 396 // Capture any exceptional control flow 397 GraphKit kit(new_jvms); 398 399 // Find the result object 400 Node* result = C->top(); 401 int result_size = method()->return_type()->size(); 402 if (result_size != 0 && !kit.stopped()) { 403 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 404 } 405 406 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 407 C->env()->notice_inlined_method(_inline_cg->method()); 408 C->set_inlining_progress(true); 409 410 kit.replace_call(call, result, true); 411 } 412 413 414 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 415 return new LateInlineCallGenerator(method, inline_cg); 416 } 417 418 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 419 ciMethod* _caller; 420 int _attempt; 421 bool _input_not_const; 422 423 virtual bool do_late_inline_check(JVMState* jvms); 424 virtual bool already_attempted() const { return _attempt > 0; } 425 426 public: 427 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 428 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 429 430 virtual bool is_mh_late_inline() const { return true; } 431 432 virtual JVMState* generate(JVMState* jvms) { 433 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 434 if (_input_not_const) { 435 // inlining won't be possible so no need to enqueue right now. 436 call_node()->set_generator(this); 437 } else { 438 Compile::current()->add_late_inline(this); 439 } 440 return new_jvms; 441 } 442 443 virtual void print_inlining_late(const char* msg) { 444 if (!_input_not_const) return; 445 LateInlineCallGenerator::print_inlining_late(msg); 446 } 447 }; 448 449 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 450 451 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); 452 453 if (!_input_not_const) { 454 _attempt++; 455 } 456 457 if (cg != NULL) { 458 assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining"); 459 _inline_cg = cg; 460 Compile::current()->dec_number_of_mh_late_inlines(); 461 return true; 462 } 463 464 call_node()->set_generator(this); 465 return false; 466 } 467 468 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 469 Compile::current()->inc_number_of_mh_late_inlines(); 470 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 471 return cg; 472 } 473 474 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 475 476 public: 477 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 478 LateInlineCallGenerator(method, inline_cg) {} 479 480 virtual JVMState* generate(JVMState* jvms) { 481 Compile *C = Compile::current(); 482 C->print_inlining_skip(this); 483 484 C->add_string_late_inline(this); 485 486 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 487 return new_jvms; 488 } 489 490 virtual bool is_string_late_inline() const { return true; } 491 }; 492 493 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 494 return new LateInlineStringCallGenerator(method, inline_cg); 495 } 496 497 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 498 499 public: 500 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 501 LateInlineCallGenerator(method, inline_cg) {} 502 503 virtual JVMState* generate(JVMState* jvms) { 504 Compile *C = Compile::current(); 505 C->print_inlining_skip(this); 506 507 C->add_boxing_late_inline(this); 508 509 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 510 return new_jvms; 511 } 512 }; 513 514 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 515 return new LateInlineBoxingCallGenerator(method, inline_cg); 516 } 517 518 //---------------------------WarmCallGenerator-------------------------------- 519 // Internal class which handles initial deferral of inlining decisions. 520 class WarmCallGenerator : public CallGenerator { 521 WarmCallInfo* _call_info; 522 CallGenerator* _if_cold; 523 CallGenerator* _if_hot; 524 bool _is_virtual; // caches virtuality of if_cold 525 bool _is_inline; // caches inline-ness of if_hot 526 527 public: 528 WarmCallGenerator(WarmCallInfo* ci, 529 CallGenerator* if_cold, 530 CallGenerator* if_hot) 531 : CallGenerator(if_cold->method()) 532 { 533 assert(method() == if_hot->method(), "consistent choices"); 534 _call_info = ci; 535 _if_cold = if_cold; 536 _if_hot = if_hot; 537 _is_virtual = if_cold->is_virtual(); 538 _is_inline = if_hot->is_inline(); 539 } 540 541 virtual bool is_inline() const { return _is_inline; } 542 virtual bool is_virtual() const { return _is_virtual; } 543 virtual bool is_deferred() const { return true; } 544 545 virtual JVMState* generate(JVMState* jvms); 546 }; 547 548 549 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 550 CallGenerator* if_cold, 551 CallGenerator* if_hot) { 552 return new WarmCallGenerator(ci, if_cold, if_hot); 553 } 554 555 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 556 Compile* C = Compile::current(); 557 if (C->log() != NULL) { 558 C->log()->elem("warm_call bci='%d'", jvms->bci()); 559 } 560 jvms = _if_cold->generate(jvms); 561 if (jvms != NULL) { 562 Node* m = jvms->map()->control(); 563 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 564 if (m->is_Catch()) m = m->in(0); else m = C->top(); 565 if (m->is_Proj()) m = m->in(0); else m = C->top(); 566 if (m->is_CallJava()) { 567 _call_info->set_call(m->as_Call()); 568 _call_info->set_hot_cg(_if_hot); 569 #ifndef PRODUCT 570 if (PrintOpto || PrintOptoInlining) { 571 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 572 tty->print("WCI: "); 573 _call_info->print(); 574 } 575 #endif 576 _call_info->set_heat(_call_info->compute_heat()); 577 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 578 } 579 } 580 return jvms; 581 } 582 583 void WarmCallInfo::make_hot() { 584 Unimplemented(); 585 } 586 587 void WarmCallInfo::make_cold() { 588 // No action: Just dequeue. 589 } 590 591 592 //------------------------PredictedCallGenerator------------------------------ 593 // Internal class which handles all out-of-line calls checking receiver type. 594 class PredictedCallGenerator : public CallGenerator { 595 ciKlass* _predicted_receiver; 596 CallGenerator* _if_missed; 597 CallGenerator* _if_hit; 598 float _hit_prob; 599 600 public: 601 PredictedCallGenerator(ciKlass* predicted_receiver, 602 CallGenerator* if_missed, 603 CallGenerator* if_hit, float hit_prob) 604 : CallGenerator(if_missed->method()) 605 { 606 // The call profile data may predict the hit_prob as extreme as 0 or 1. 607 // Remove the extremes values from the range. 608 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 609 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 610 611 _predicted_receiver = predicted_receiver; 612 _if_missed = if_missed; 613 _if_hit = if_hit; 614 _hit_prob = hit_prob; 615 } 616 617 virtual bool is_virtual() const { return true; } 618 virtual bool is_inline() const { return _if_hit->is_inline(); } 619 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 620 621 virtual JVMState* generate(JVMState* jvms); 622 }; 623 624 625 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 626 CallGenerator* if_missed, 627 CallGenerator* if_hit, 628 float hit_prob) { 629 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 630 } 631 632 633 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 634 GraphKit kit(jvms); 635 PhaseGVN& gvn = kit.gvn(); 636 // We need an explicit receiver null_check before checking its type. 637 // We share a map with the caller, so his JVMS gets adjusted. 638 Node* receiver = kit.argument(0); 639 640 CompileLog* log = kit.C->log(); 641 if (log != NULL) { 642 log->elem("predicted_call bci='%d' klass='%d'", 643 jvms->bci(), log->identify(_predicted_receiver)); 644 } 645 646 receiver = kit.null_check_receiver_before_call(method()); 647 if (kit.stopped()) { 648 return kit.transfer_exceptions_into_jvms(); 649 } 650 651 // Make a copy of the replaced nodes in case we need to restore them 652 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 653 replaced_nodes.clone(); 654 655 Node* exact_receiver = receiver; // will get updated in place... 656 Node* slow_ctl = kit.type_check_receiver(receiver, 657 _predicted_receiver, _hit_prob, 658 &exact_receiver); 659 660 SafePointNode* slow_map = NULL; 661 JVMState* slow_jvms = NULL; 662 { PreserveJVMState pjvms(&kit); 663 kit.set_control(slow_ctl); 664 if (!kit.stopped()) { 665 slow_jvms = _if_missed->generate(kit.sync_jvms()); 666 if (kit.failing()) 667 return NULL; // might happen because of NodeCountInliningCutoff 668 assert(slow_jvms != NULL, "must be"); 669 kit.add_exception_states_from(slow_jvms); 670 kit.set_map(slow_jvms->map()); 671 if (!kit.stopped()) 672 slow_map = kit.stop(); 673 } 674 } 675 676 if (kit.stopped()) { 677 // Instance exactly does not matches the desired type. 678 kit.set_jvms(slow_jvms); 679 return kit.transfer_exceptions_into_jvms(); 680 } 681 682 // fall through if the instance exactly matches the desired type 683 kit.replace_in_map(receiver, exact_receiver); 684 685 // Make the hot call: 686 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 687 if (new_jvms == NULL) { 688 // Inline failed, so make a direct call. 689 assert(_if_hit->is_inline(), "must have been a failed inline"); 690 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 691 new_jvms = cg->generate(kit.sync_jvms()); 692 } 693 kit.add_exception_states_from(new_jvms); 694 kit.set_jvms(new_jvms); 695 696 // Need to merge slow and fast? 697 if (slow_map == NULL) { 698 // The fast path is the only path remaining. 699 return kit.transfer_exceptions_into_jvms(); 700 } 701 702 if (kit.stopped()) { 703 // Inlined method threw an exception, so it's just the slow path after all. 704 kit.set_jvms(slow_jvms); 705 return kit.transfer_exceptions_into_jvms(); 706 } 707 708 // There are 2 branches and the replaced nodes are only valid on 709 // one: restore the replaced nodes to what they were before the 710 // branch. 711 kit.map()->set_replaced_nodes(replaced_nodes); 712 713 // Finish the diamond. 714 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 715 RegionNode* region = new (kit.C) RegionNode(3); 716 region->init_req(1, kit.control()); 717 region->init_req(2, slow_map->control()); 718 kit.set_control(gvn.transform(region)); 719 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 720 iophi->set_req(2, slow_map->i_o()); 721 kit.set_i_o(gvn.transform(iophi)); 722 // Merge memory 723 kit.merge_memory(slow_map->merged_memory(), region, 2); 724 // Transform new memory Phis. 725 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 726 Node* phi = mms.memory(); 727 if (phi->is_Phi() && phi->in(0) == region) { 728 mms.set_memory(gvn.transform(phi)); 729 } 730 } 731 uint tos = kit.jvms()->stkoff() + kit.sp(); 732 uint limit = slow_map->req(); 733 for (uint i = TypeFunc::Parms; i < limit; i++) { 734 // Skip unused stack slots; fast forward to monoff(); 735 if (i == tos) { 736 i = kit.jvms()->monoff(); 737 if( i >= limit ) break; 738 } 739 Node* m = kit.map()->in(i); 740 Node* n = slow_map->in(i); 741 if (m != n) { 742 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 743 Node* phi = PhiNode::make(region, m, t); 744 phi->set_req(2, n); 745 kit.map()->set_req(i, gvn.transform(phi)); 746 } 747 } 748 return kit.transfer_exceptions_into_jvms(); 749 } 750 751 752 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 753 assert(callee->is_method_handle_intrinsic() || 754 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch"); 755 bool input_not_const; 756 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); 757 Compile* C = Compile::current(); 758 if (cg != NULL) { 759 if (!delayed_forbidden && AlwaysIncrementalInline) { 760 return CallGenerator::for_late_inline(callee, cg); 761 } else { 762 return cg; 763 } 764 } 765 int bci = jvms->bci(); 766 ciCallProfile profile = caller->call_profile_at_bci(bci); 767 int call_site_count = caller->scale_count(profile.count()); 768 769 if (IncrementalInline && call_site_count > 0 && 770 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { 771 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 772 } else { 773 // Out-of-line call. 774 return CallGenerator::for_direct_call(callee); 775 } 776 } 777 778 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { 779 GraphKit kit(jvms); 780 PhaseGVN& gvn = kit.gvn(); 781 Compile* C = kit.C; 782 vmIntrinsics::ID iid = callee->intrinsic_id(); 783 input_not_const = true; 784 switch (iid) { 785 case vmIntrinsics::_invokeBasic: 786 { 787 // Get MethodHandle receiver: 788 Node* receiver = kit.argument(0); 789 if (receiver->Opcode() == Op_ConP) { 790 input_not_const = false; 791 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 792 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 793 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove 794 const int vtable_index = Method::invalid_vtable_index; 795 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true); 796 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 797 if (cg != NULL && cg->is_inline()) 798 return cg; 799 } 800 } 801 break; 802 803 case vmIntrinsics::_linkToVirtual: 804 case vmIntrinsics::_linkToStatic: 805 case vmIntrinsics::_linkToSpecial: 806 case vmIntrinsics::_linkToInterface: 807 { 808 // Get MemberName argument: 809 Node* member_name = kit.argument(callee->arg_size() - 1); 810 if (member_name->Opcode() == Op_ConP) { 811 input_not_const = false; 812 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 813 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 814 815 // In lamda forms we erase signature types to avoid resolving issues 816 // involving class loaders. When we optimize a method handle invoke 817 // to a direct call we must cast the receiver and arguments to its 818 // actual types. 819 ciSignature* signature = target->signature(); 820 const int receiver_skip = target->is_static() ? 0 : 1; 821 // Cast receiver to its type. 822 if (!target->is_static()) { 823 Node* arg = kit.argument(0); 824 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 825 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); 826 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 827 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type)); 828 kit.set_argument(0, cast_obj); 829 } 830 } 831 // Cast reference arguments to its type. 832 for (int i = 0, j = 0; i < signature->count(); i++) { 833 ciType* t = signature->type_at(i); 834 if (t->is_klass()) { 835 Node* arg = kit.argument(receiver_skip + j); 836 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 837 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 838 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 839 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type)); 840 kit.set_argument(receiver_skip + j, cast_obj); 841 } 842 } 843 j += t->size(); // long and double take two slots 844 } 845 846 // Try to get the most accurate receiver type 847 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 848 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 849 int vtable_index = Method::invalid_vtable_index; 850 bool call_does_dispatch = false; 851 852 ciKlass* speculative_receiver_type = NULL; 853 if (is_virtual_or_interface) { 854 ciInstanceKlass* klass = target->holder(); 855 Node* receiver_node = kit.argument(0); 856 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 857 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 858 // optimize_virtual_call() takes 2 different holder 859 // arguments for a corner case that doesn't apply here (see 860 // Parse::do_call()) 861 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass, 862 target, receiver_type, is_virtual, 863 call_does_dispatch, vtable_index, // out-parameters 864 /*check_access=*/false); 865 // We lack profiling at this call but type speculation may 866 // provide us with a type 867 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; 868 } 869 870 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true); 871 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 872 if (cg != NULL && cg->is_inline()) 873 return cg; 874 } 875 } 876 break; 877 878 default: 879 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); 880 break; 881 } 882 return NULL; 883 } 884 885 886 //------------------------PredicatedIntrinsicGenerator------------------------------ 887 // Internal class which handles all predicated Intrinsic calls. 888 class PredicatedIntrinsicGenerator : public CallGenerator { 889 CallGenerator* _intrinsic; 890 CallGenerator* _cg; 891 892 public: 893 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 894 CallGenerator* cg) 895 : CallGenerator(cg->method()) 896 { 897 _intrinsic = intrinsic; 898 _cg = cg; 899 } 900 901 virtual bool is_virtual() const { return true; } 902 virtual bool is_inlined() const { return true; } 903 virtual bool is_intrinsic() const { return true; } 904 905 virtual JVMState* generate(JVMState* jvms); 906 }; 907 908 909 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 910 CallGenerator* cg) { 911 return new PredicatedIntrinsicGenerator(intrinsic, cg); 912 } 913 914 915 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 916 // The code we want to generate here is: 917 // if (receiver == NULL) 918 // uncommon_Trap 919 // if (predicate(0)) 920 // do_intrinsic(0) 921 // else 922 // if (predicate(1)) 923 // do_intrinsic(1) 924 // ... 925 // else 926 // do_java_comp 927 928 GraphKit kit(jvms); 929 PhaseGVN& gvn = kit.gvn(); 930 931 CompileLog* log = kit.C->log(); 932 if (log != NULL) { 933 log->elem("predicated_intrinsic bci='%d' method='%d'", 934 jvms->bci(), log->identify(method())); 935 } 936 937 if (!method()->is_static()) { 938 // We need an explicit receiver null_check before checking its type in predicate. 939 // We share a map with the caller, so his JVMS gets adjusted. 940 Node* receiver = kit.null_check_receiver_before_call(method()); 941 if (kit.stopped()) { 942 return kit.transfer_exceptions_into_jvms(); 943 } 944 } 945 946 int n_predicates = _intrinsic->predicates_count(); 947 assert(n_predicates > 0, "sanity"); 948 949 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 950 951 // Region for normal compilation code if intrinsic failed. 952 Node* slow_region = new (kit.C) RegionNode(1); 953 954 int results = 0; 955 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 956 #ifdef ASSERT 957 JVMState* old_jvms = kit.jvms(); 958 SafePointNode* old_map = kit.map(); 959 Node* old_io = old_map->i_o(); 960 Node* old_mem = old_map->memory(); 961 Node* old_exc = old_map->next_exception(); 962 #endif 963 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 964 #ifdef ASSERT 965 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 966 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 967 SafePointNode* new_map = kit.map(); 968 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 969 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 970 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 971 #endif 972 if (!kit.stopped()) { 973 PreserveJVMState pjvms(&kit); 974 // Generate intrinsic code: 975 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 976 if (new_jvms == NULL) { 977 // Intrinsic failed, use normal compilation path for this predicate. 978 slow_region->add_req(kit.control()); 979 } else { 980 kit.add_exception_states_from(new_jvms); 981 kit.set_jvms(new_jvms); 982 if (!kit.stopped()) { 983 result_jvms[results++] = kit.jvms(); 984 } 985 } 986 } 987 if (else_ctrl == NULL) { 988 else_ctrl = kit.C->top(); 989 } 990 kit.set_control(else_ctrl); 991 } 992 if (!kit.stopped()) { 993 // Final 'else' after predicates. 994 slow_region->add_req(kit.control()); 995 } 996 if (slow_region->req() > 1) { 997 PreserveJVMState pjvms(&kit); 998 // Generate normal compilation code: 999 kit.set_control(gvn.transform(slow_region)); 1000 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1001 if (kit.failing()) 1002 return NULL; // might happen because of NodeCountInliningCutoff 1003 assert(new_jvms != NULL, "must be"); 1004 kit.add_exception_states_from(new_jvms); 1005 kit.set_jvms(new_jvms); 1006 if (!kit.stopped()) { 1007 result_jvms[results++] = kit.jvms(); 1008 } 1009 } 1010 1011 if (results == 0) { 1012 // All paths ended in uncommon traps. 1013 (void) kit.stop(); 1014 return kit.transfer_exceptions_into_jvms(); 1015 } 1016 1017 if (results == 1) { // Only one path 1018 kit.set_jvms(result_jvms[0]); 1019 return kit.transfer_exceptions_into_jvms(); 1020 } 1021 1022 // Merge all paths. 1023 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1024 RegionNode* region = new (kit.C) RegionNode(results + 1); 1025 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1026 for (int i = 0; i < results; i++) { 1027 JVMState* jvms = result_jvms[i]; 1028 int path = i + 1; 1029 SafePointNode* map = jvms->map(); 1030 region->init_req(path, map->control()); 1031 iophi->set_req(path, map->i_o()); 1032 if (i == 0) { 1033 kit.set_jvms(jvms); 1034 } else { 1035 kit.merge_memory(map->merged_memory(), region, path); 1036 } 1037 } 1038 kit.set_control(gvn.transform(region)); 1039 kit.set_i_o(gvn.transform(iophi)); 1040 // Transform new memory Phis. 1041 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1042 Node* phi = mms.memory(); 1043 if (phi->is_Phi() && phi->in(0) == region) { 1044 mms.set_memory(gvn.transform(phi)); 1045 } 1046 } 1047 1048 // Merge debug info. 1049 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1050 uint tos = kit.jvms()->stkoff() + kit.sp(); 1051 Node* map = kit.map(); 1052 uint limit = map->req(); 1053 for (uint i = TypeFunc::Parms; i < limit; i++) { 1054 // Skip unused stack slots; fast forward to monoff(); 1055 if (i == tos) { 1056 i = kit.jvms()->monoff(); 1057 if( i >= limit ) break; 1058 } 1059 Node* n = map->in(i); 1060 ins[0] = n; 1061 const Type* t = gvn.type(n); 1062 bool needs_phi = false; 1063 for (int j = 1; j < results; j++) { 1064 JVMState* jvms = result_jvms[j]; 1065 Node* jmap = jvms->map(); 1066 Node* m = NULL; 1067 if (jmap->req() > i) { 1068 m = jmap->in(i); 1069 if (m != n) { 1070 needs_phi = true; 1071 t = t->meet_speculative(gvn.type(m)); 1072 } 1073 } 1074 ins[j] = m; 1075 } 1076 if (needs_phi) { 1077 Node* phi = PhiNode::make(region, n, t); 1078 for (int j = 1; j < results; j++) { 1079 phi->set_req(j + 1, ins[j]); 1080 } 1081 map->set_req(i, gvn.transform(phi)); 1082 } 1083 } 1084 1085 return kit.transfer_exceptions_into_jvms(); 1086 } 1087 1088 //-------------------------UncommonTrapCallGenerator----------------------------- 1089 // Internal class which handles all out-of-line calls checking receiver type. 1090 class UncommonTrapCallGenerator : public CallGenerator { 1091 Deoptimization::DeoptReason _reason; 1092 Deoptimization::DeoptAction _action; 1093 1094 public: 1095 UncommonTrapCallGenerator(ciMethod* m, 1096 Deoptimization::DeoptReason reason, 1097 Deoptimization::DeoptAction action) 1098 : CallGenerator(m) 1099 { 1100 _reason = reason; 1101 _action = action; 1102 } 1103 1104 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1105 virtual bool is_trap() const { return true; } 1106 1107 virtual JVMState* generate(JVMState* jvms); 1108 }; 1109 1110 1111 CallGenerator* 1112 CallGenerator::for_uncommon_trap(ciMethod* m, 1113 Deoptimization::DeoptReason reason, 1114 Deoptimization::DeoptAction action) { 1115 return new UncommonTrapCallGenerator(m, reason, action); 1116 } 1117 1118 1119 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1120 GraphKit kit(jvms); 1121 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1122 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 1123 // Use callsite signature always. 1124 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 1125 int nargs = declared_method->arg_size(); 1126 kit.inc_sp(nargs); 1127 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1128 if (_reason == Deoptimization::Reason_class_check && 1129 _action == Deoptimization::Action_maybe_recompile) { 1130 // Temp fix for 6529811 1131 // Don't allow uncommon_trap to override our decision to recompile in the event 1132 // of a class cast failure for a monomorphic call as it will never let us convert 1133 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1134 bool keep_exact_action = true; 1135 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1136 } else { 1137 kit.uncommon_trap(_reason, _action); 1138 } 1139 return kit.transfer_exceptions_into_jvms(); 1140 } 1141 1142 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1143 1144 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1145 1146 #define NODES_OVERHEAD_PER_METHOD (30.0) 1147 #define NODES_PER_BYTECODE (9.5) 1148 1149 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1150 int call_count = profile.count(); 1151 int code_size = call_method->code_size(); 1152 1153 // Expected execution count is based on the historical count: 1154 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1155 1156 // Expected profit from inlining, in units of simple call-overheads. 1157 _profit = 1.0; 1158 1159 // Expected work performed by the call in units of call-overheads. 1160 // %%% need an empirical curve fit for "work" (time in call) 1161 float bytecodes_per_call = 3; 1162 _work = 1.0 + code_size / bytecodes_per_call; 1163 1164 // Expected size of compilation graph: 1165 // -XX:+PrintParseStatistics once reported: 1166 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1167 // Histogram of 144298 parsed bytecodes: 1168 // %%% Need an better predictor for graph size. 1169 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1170 } 1171 1172 // is_cold: Return true if the node should never be inlined. 1173 // This is true if any of the key metrics are extreme. 1174 bool WarmCallInfo::is_cold() const { 1175 if (count() < WarmCallMinCount) return true; 1176 if (profit() < WarmCallMinProfit) return true; 1177 if (work() > WarmCallMaxWork) return true; 1178 if (size() > WarmCallMaxSize) return true; 1179 return false; 1180 } 1181 1182 // is_hot: Return true if the node should be inlined immediately. 1183 // This is true if any of the key metrics are extreme. 1184 bool WarmCallInfo::is_hot() const { 1185 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1186 if (count() >= HotCallCountThreshold) return true; 1187 if (profit() >= HotCallProfitThreshold) return true; 1188 if (work() <= HotCallTrivialWork) return true; 1189 if (size() <= HotCallTrivialSize) return true; 1190 return false; 1191 } 1192 1193 // compute_heat: 1194 float WarmCallInfo::compute_heat() const { 1195 assert(!is_cold(), "compute heat only on warm nodes"); 1196 assert(!is_hot(), "compute heat only on warm nodes"); 1197 int min_size = MAX2(0, (int)HotCallTrivialSize); 1198 int max_size = MIN2(500, (int)WarmCallMaxSize); 1199 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1200 float size_factor; 1201 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1202 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1203 else if (method_size < 0.5) size_factor = 1; // better than avg. 1204 else size_factor = 0.5; // worse than avg. 1205 return (count() * profit() * size_factor); 1206 } 1207 1208 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1209 assert(this != that, "compare only different WCIs"); 1210 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1211 if (this->heat() > that->heat()) return true; 1212 if (this->heat() < that->heat()) return false; 1213 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1214 // Equal heat. Break the tie some other way. 1215 if (!this->call() || !that->call()) return (address)this > (address)that; 1216 return this->call()->_idx > that->call()->_idx; 1217 } 1218 1219 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1220 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1221 1222 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1223 assert(next() == UNINIT_NEXT, "not yet on any list"); 1224 WarmCallInfo* prev_p = NULL; 1225 WarmCallInfo* next_p = head; 1226 while (next_p != NULL && next_p->warmer_than(this)) { 1227 prev_p = next_p; 1228 next_p = prev_p->next(); 1229 } 1230 // Install this between prev_p and next_p. 1231 this->set_next(next_p); 1232 if (prev_p == NULL) 1233 head = this; 1234 else 1235 prev_p->set_next(this); 1236 return head; 1237 } 1238 1239 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1240 WarmCallInfo* prev_p = NULL; 1241 WarmCallInfo* next_p = head; 1242 while (next_p != this) { 1243 assert(next_p != NULL, "this must be in the list somewhere"); 1244 prev_p = next_p; 1245 next_p = prev_p->next(); 1246 } 1247 next_p = this->next(); 1248 debug_only(this->set_next(UNINIT_NEXT)); 1249 // Remove this from between prev_p and next_p. 1250 if (prev_p == NULL) 1251 head = next_p; 1252 else 1253 prev_p->set_next(next_p); 1254 return head; 1255 } 1256 1257 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1258 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1259 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1260 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1261 1262 WarmCallInfo* WarmCallInfo::always_hot() { 1263 assert(_always_hot.is_hot(), "must always be hot"); 1264 return &_always_hot; 1265 } 1266 1267 WarmCallInfo* WarmCallInfo::always_cold() { 1268 assert(_always_cold.is_cold(), "must always be cold"); 1269 return &_always_cold; 1270 } 1271 1272 1273 #ifndef PRODUCT 1274 1275 void WarmCallInfo::print() const { 1276 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1277 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1278 count(), profit(), work(), size(), compute_heat(), next()); 1279 tty->cr(); 1280 if (call() != NULL) call()->dump(); 1281 } 1282 1283 void print_wci(WarmCallInfo* ci) { 1284 ci->print(); 1285 } 1286 1287 void WarmCallInfo::print_all() const { 1288 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1289 p->print(); 1290 } 1291 1292 int WarmCallInfo::count_all() const { 1293 int cnt = 0; 1294 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1295 cnt++; 1296 return cnt; 1297 } 1298 1299 #endif //PRODUCT