1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/cfgnode.hpp" 37 #include "opto/connode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/shenandoahSupport.hpp" 42 #include "opto/subnode.hpp" 43 44 45 // Utility function. 46 const TypeFunc* CallGenerator::tf() const { 47 return TypeFunc::make(method()); 48 } 49 50 //-----------------------------ParseGenerator--------------------------------- 51 // Internal class which handles all direct bytecode traversal. 52 class ParseGenerator : public InlineCallGenerator { 53 private: 54 bool _is_osr; 55 float _expected_uses; 56 57 public: 58 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 59 : InlineCallGenerator(method) 60 { 61 _is_osr = is_osr; 62 _expected_uses = expected_uses; 63 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 64 } 65 66 virtual bool is_parse() const { return true; } 67 virtual JVMState* generate(JVMState* jvms); 68 int is_osr() { return _is_osr; } 69 70 }; 71 72 JVMState* ParseGenerator::generate(JVMState* jvms) { 73 Compile* C = Compile::current(); 74 75 if (is_osr()) { 76 // The JVMS for a OSR has a single argument (see its TypeFunc). 77 assert(jvms->depth() == 1, "no inline OSR"); 78 } 79 80 if (C->failing()) { 81 return NULL; // bailing out of the compile; do not try to parse 82 } 83 84 Parse parser(jvms, method(), _expected_uses); 85 // Grab signature for matching/allocation 86 #ifdef ASSERT 87 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 88 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 89 assert(C->env()->system_dictionary_modification_counter_changed(), 90 "Must invalidate if TypeFuncs differ"); 91 } 92 #endif 93 94 GraphKit& exits = parser.exits(); 95 96 if (C->failing()) { 97 while (exits.pop_exception_state() != NULL) ; 98 return NULL; 99 } 100 101 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 102 103 // Simply return the exit state of the parser, 104 // augmented by any exceptional states. 105 return exits.transfer_exceptions_into_jvms(); 106 } 107 108 //---------------------------DirectCallGenerator------------------------------ 109 // Internal class which handles all out-of-line calls w/o receiver type checks. 110 class DirectCallGenerator : public CallGenerator { 111 private: 112 CallStaticJavaNode* _call_node; 113 // Force separate memory and I/O projections for the exceptional 114 // paths to facilitate late inlinig. 115 bool _separate_io_proj; 116 117 public: 118 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 119 : CallGenerator(method), 120 _separate_io_proj(separate_io_proj) 121 { 122 } 123 virtual JVMState* generate(JVMState* jvms); 124 125 CallStaticJavaNode* call_node() const { return _call_node; } 126 }; 127 128 JVMState* DirectCallGenerator::generate(JVMState* jvms) { 129 GraphKit kit(jvms); 130 bool is_static = method()->is_static(); 131 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 132 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 133 134 if (kit.C->log() != NULL) { 135 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 136 } 137 138 CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 139 _call_node = call; // Save the call node in case we need it later 140 if (!is_static) { 141 // Make an explicit receiver null_check as part of this call. 142 // Since we share a map with the caller, his JVMS gets adjusted. 143 kit.null_check_receiver_before_call(method()); 144 if (kit.stopped()) { 145 // And dump it back to the caller, decorated with any exceptions: 146 return kit.transfer_exceptions_into_jvms(); 147 } 148 // Mark the call node as virtual, sort of: 149 call->set_optimized_virtual(true); 150 if (method()->is_method_handle_intrinsic() || 151 method()->is_compiled_lambda_form()) { 152 call->set_method_handle_invoke(true); 153 } 154 } 155 kit.set_arguments_for_java_call(call); 156 kit.set_edges_for_java_call(call, false, _separate_io_proj); 157 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 158 kit.push_node(method()->return_type()->basic_type(), ret); 159 return kit.transfer_exceptions_into_jvms(); 160 } 161 162 //--------------------------VirtualCallGenerator------------------------------ 163 // Internal class which handles all out-of-line calls checking receiver type. 164 class VirtualCallGenerator : public CallGenerator { 165 private: 166 int _vtable_index; 167 public: 168 VirtualCallGenerator(ciMethod* method, int vtable_index) 169 : CallGenerator(method), _vtable_index(vtable_index) 170 { 171 assert(vtable_index == Method::invalid_vtable_index || 172 vtable_index >= 0, "either invalid or usable"); 173 } 174 virtual bool is_virtual() const { return true; } 175 virtual JVMState* generate(JVMState* jvms); 176 }; 177 178 JVMState* VirtualCallGenerator::generate(JVMState* jvms) { 179 GraphKit kit(jvms); 180 Node* receiver = kit.argument(0); 181 182 if (kit.C->log() != NULL) { 183 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 184 } 185 186 // If the receiver is a constant null, do not torture the system 187 // by attempting to call through it. The compile will proceed 188 // correctly, but may bail out in final_graph_reshaping, because 189 // the call instruction will have a seemingly deficient out-count. 190 // (The bailout says something misleading about an "infinite loop".) 191 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 192 assert(Bytecodes::is_invoke(kit.java_bc()), err_msg("%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()))); 193 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 194 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 195 kit.inc_sp(arg_size); // restore arguments 196 kit.uncommon_trap(Deoptimization::Reason_null_check, 197 Deoptimization::Action_none, 198 NULL, "null receiver"); 199 return kit.transfer_exceptions_into_jvms(); 200 } 201 202 // Ideally we would unconditionally do a null check here and let it 203 // be converted to an implicit check based on profile information. 204 // However currently the conversion to implicit null checks in 205 // Block::implicit_null_check() only looks for loads and stores, not calls. 206 ciMethod *caller = kit.method(); 207 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 208 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 209 ((ImplicitNullCheckThreshold > 0) && caller_md && 210 (caller_md->trap_count(Deoptimization::Reason_null_check) 211 >= (uint)ImplicitNullCheckThreshold))) { 212 // Make an explicit receiver null_check as part of this call. 213 // Since we share a map with the caller, his JVMS gets adjusted. 214 receiver = kit.null_check_receiver_before_call(method()); 215 if (kit.stopped()) { 216 // And dump it back to the caller, decorated with any exceptions: 217 return kit.transfer_exceptions_into_jvms(); 218 } 219 } 220 221 assert(!method()->is_static(), "virtual call must not be to static"); 222 assert(!method()->is_final(), "virtual call should not be to final"); 223 assert(!method()->is_private(), "virtual call should not be to private"); 224 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 225 "no vtable calls if +UseInlineCaches "); 226 address target = SharedRuntime::get_resolve_virtual_call_stub(); 227 // Normal inline cache used for call 228 CallDynamicJavaNode *call = new (kit.C) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 229 kit.set_arguments_for_java_call(call); 230 kit.set_edges_for_java_call(call); 231 Node* ret = kit.set_results_for_java_call(call); 232 kit.push_node(method()->return_type()->basic_type(), ret); 233 234 // Represent the effect of an implicit receiver null_check 235 // as part of this call. Since we share a map with the caller, 236 // his JVMS gets adjusted. 237 kit.cast_not_null(receiver); 238 return kit.transfer_exceptions_into_jvms(); 239 } 240 241 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 242 if (InlineTree::check_can_parse(m) != NULL) return NULL; 243 return new ParseGenerator(m, expected_uses); 244 } 245 246 // As a special case, the JVMS passed to this CallGenerator is 247 // for the method execution already in progress, not just the JVMS 248 // of the caller. Thus, this CallGenerator cannot be mixed with others! 249 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 250 if (InlineTree::check_can_parse(m) != NULL) return NULL; 251 float past_uses = m->interpreter_invocation_count(); 252 float expected_uses = past_uses; 253 return new ParseGenerator(m, expected_uses, true); 254 } 255 256 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 257 assert(!m->is_abstract(), "for_direct_call mismatch"); 258 return new DirectCallGenerator(m, separate_io_proj); 259 } 260 261 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 262 assert(!m->is_static(), "for_virtual_call mismatch"); 263 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 264 return new VirtualCallGenerator(m, vtable_index); 265 } 266 267 // Allow inlining decisions to be delayed 268 class LateInlineCallGenerator : public DirectCallGenerator { 269 protected: 270 CallGenerator* _inline_cg; 271 272 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 273 274 public: 275 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 276 DirectCallGenerator(method, true), _inline_cg(inline_cg) {} 277 278 virtual bool is_late_inline() const { return true; } 279 280 // Convert the CallStaticJava into an inline 281 virtual void do_late_inline(); 282 283 virtual JVMState* generate(JVMState* jvms) { 284 Compile *C = Compile::current(); 285 C->print_inlining_skip(this); 286 287 // Record that this call site should be revisited once the main 288 // parse is finished. 289 if (!is_mh_late_inline()) { 290 C->add_late_inline(this); 291 } 292 293 // Emit the CallStaticJava and request separate projections so 294 // that the late inlining logic can distinguish between fall 295 // through and exceptional uses of the memory and io projections 296 // as is done for allocations and macro expansion. 297 return DirectCallGenerator::generate(jvms); 298 } 299 300 virtual void print_inlining_late(const char* msg) { 301 CallNode* call = call_node(); 302 Compile* C = Compile::current(); 303 C->print_inlining_insert(this); 304 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 305 } 306 307 }; 308 309 void LateInlineCallGenerator::do_late_inline() { 310 // Can't inline it 311 CallStaticJavaNode* call = call_node(); 312 if (call == NULL || call->outcnt() == 0 || 313 call->in(0) == NULL || call->in(0)->is_top()) { 314 return; 315 } 316 317 const TypeTuple *r = call->tf()->domain(); 318 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 319 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 320 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 321 return; 322 } 323 } 324 325 if (call->in(TypeFunc::Memory)->is_top()) { 326 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 327 return; 328 } 329 330 Compile* C = Compile::current(); 331 // Remove inlined methods from Compiler's lists. 332 if (call->is_macro()) { 333 C->remove_macro_node(call); 334 } 335 336 // Make a clone of the JVMState that appropriate to use for driving a parse 337 JVMState* old_jvms = call->jvms(); 338 JVMState* jvms = old_jvms->clone_shallow(C); 339 uint size = call->req(); 340 SafePointNode* map = new (C) SafePointNode(size, jvms); 341 for (uint i1 = 0; i1 < size; i1++) { 342 map->init_req(i1, call->in(i1)); 343 } 344 345 // Make sure the state is a MergeMem for parsing. 346 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 347 Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory)); 348 C->initial_gvn()->set_type_bottom(mem); 349 map->set_req(TypeFunc::Memory, mem); 350 } 351 352 uint nargs = method()->arg_size(); 353 // blow away old call arguments 354 Node* top = C->top(); 355 for (uint i1 = 0; i1 < nargs; i1++) { 356 map->set_req(TypeFunc::Parms + i1, top); 357 } 358 jvms->set_map(map); 359 360 // Make enough space in the expression stack to transfer 361 // the incoming arguments and return value. 362 map->ensure_stack(jvms, jvms->method()->max_stack()); 363 for (uint i1 = 0; i1 < nargs; i1++) { 364 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); 365 } 366 367 // This check is done here because for_method_handle_inline() method 368 // needs jvms for inlined state. 369 if (!do_late_inline_check(jvms)) { 370 map->disconnect_inputs(NULL, C); 371 return; 372 } 373 374 C->print_inlining_insert(this); 375 376 CompileLog* log = C->log(); 377 if (log != NULL) { 378 log->head("late_inline method='%d'", log->identify(method())); 379 JVMState* p = jvms; 380 while (p != NULL) { 381 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 382 p = p->caller(); 383 } 384 log->tail("late_inline"); 385 } 386 387 // Setup default node notes to be picked up by the inlining 388 Node_Notes* old_nn = C->node_notes_at(call->_idx); 389 if (old_nn != NULL) { 390 Node_Notes* entry_nn = old_nn->clone(C); 391 entry_nn->set_jvms(jvms); 392 C->set_default_node_notes(entry_nn); 393 } 394 395 // Now perform the inling using the synthesized JVMState 396 JVMState* new_jvms = _inline_cg->generate(jvms); 397 if (new_jvms == NULL) return; // no change 398 if (C->failing()) return; 399 400 // Capture any exceptional control flow 401 GraphKit kit(new_jvms); 402 403 // Find the result object 404 Node* result = C->top(); 405 int result_size = method()->return_type()->size(); 406 if (result_size != 0 && !kit.stopped()) { 407 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 408 } 409 410 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 411 C->env()->notice_inlined_method(_inline_cg->method()); 412 C->set_inlining_progress(true); 413 414 kit.replace_call(call, result, true); 415 } 416 417 418 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 419 return new LateInlineCallGenerator(method, inline_cg); 420 } 421 422 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 423 ciMethod* _caller; 424 int _attempt; 425 bool _input_not_const; 426 427 virtual bool do_late_inline_check(JVMState* jvms); 428 virtual bool already_attempted() const { return _attempt > 0; } 429 430 public: 431 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 432 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 433 434 virtual bool is_mh_late_inline() const { return true; } 435 436 virtual JVMState* generate(JVMState* jvms) { 437 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); 438 if (_input_not_const) { 439 // inlining won't be possible so no need to enqueue right now. 440 call_node()->set_generator(this); 441 } else { 442 Compile::current()->add_late_inline(this); 443 } 444 return new_jvms; 445 } 446 447 virtual void print_inlining_late(const char* msg) { 448 if (!_input_not_const) return; 449 LateInlineCallGenerator::print_inlining_late(msg); 450 } 451 }; 452 453 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 454 455 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); 456 457 if (!_input_not_const) { 458 _attempt++; 459 } 460 461 if (cg != NULL) { 462 assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining"); 463 _inline_cg = cg; 464 Compile::current()->dec_number_of_mh_late_inlines(); 465 return true; 466 } 467 468 call_node()->set_generator(this); 469 return false; 470 } 471 472 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 473 Compile::current()->inc_number_of_mh_late_inlines(); 474 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 475 return cg; 476 } 477 478 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 479 480 public: 481 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 482 LateInlineCallGenerator(method, inline_cg) {} 483 484 virtual JVMState* generate(JVMState* jvms) { 485 Compile *C = Compile::current(); 486 C->print_inlining_skip(this); 487 488 C->add_string_late_inline(this); 489 490 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 491 return new_jvms; 492 } 493 494 virtual bool is_string_late_inline() const { return true; } 495 }; 496 497 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 498 return new LateInlineStringCallGenerator(method, inline_cg); 499 } 500 501 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 502 503 public: 504 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 505 LateInlineCallGenerator(method, inline_cg) {} 506 507 virtual JVMState* generate(JVMState* jvms) { 508 Compile *C = Compile::current(); 509 C->print_inlining_skip(this); 510 511 C->add_boxing_late_inline(this); 512 513 JVMState* new_jvms = DirectCallGenerator::generate(jvms); 514 return new_jvms; 515 } 516 }; 517 518 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 519 return new LateInlineBoxingCallGenerator(method, inline_cg); 520 } 521 522 //---------------------------WarmCallGenerator-------------------------------- 523 // Internal class which handles initial deferral of inlining decisions. 524 class WarmCallGenerator : public CallGenerator { 525 WarmCallInfo* _call_info; 526 CallGenerator* _if_cold; 527 CallGenerator* _if_hot; 528 bool _is_virtual; // caches virtuality of if_cold 529 bool _is_inline; // caches inline-ness of if_hot 530 531 public: 532 WarmCallGenerator(WarmCallInfo* ci, 533 CallGenerator* if_cold, 534 CallGenerator* if_hot) 535 : CallGenerator(if_cold->method()) 536 { 537 assert(method() == if_hot->method(), "consistent choices"); 538 _call_info = ci; 539 _if_cold = if_cold; 540 _if_hot = if_hot; 541 _is_virtual = if_cold->is_virtual(); 542 _is_inline = if_hot->is_inline(); 543 } 544 545 virtual bool is_inline() const { return _is_inline; } 546 virtual bool is_virtual() const { return _is_virtual; } 547 virtual bool is_deferred() const { return true; } 548 549 virtual JVMState* generate(JVMState* jvms); 550 }; 551 552 553 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 554 CallGenerator* if_cold, 555 CallGenerator* if_hot) { 556 return new WarmCallGenerator(ci, if_cold, if_hot); 557 } 558 559 JVMState* WarmCallGenerator::generate(JVMState* jvms) { 560 Compile* C = Compile::current(); 561 if (C->log() != NULL) { 562 C->log()->elem("warm_call bci='%d'", jvms->bci()); 563 } 564 jvms = _if_cold->generate(jvms); 565 if (jvms != NULL) { 566 Node* m = jvms->map()->control(); 567 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 568 if (m->is_Catch()) m = m->in(0); else m = C->top(); 569 if (m->is_Proj()) m = m->in(0); else m = C->top(); 570 if (m->is_CallJava()) { 571 _call_info->set_call(m->as_Call()); 572 _call_info->set_hot_cg(_if_hot); 573 #ifndef PRODUCT 574 if (PrintOpto || PrintOptoInlining) { 575 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 576 tty->print("WCI: "); 577 _call_info->print(); 578 } 579 #endif 580 _call_info->set_heat(_call_info->compute_heat()); 581 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 582 } 583 } 584 return jvms; 585 } 586 587 void WarmCallInfo::make_hot() { 588 Unimplemented(); 589 } 590 591 void WarmCallInfo::make_cold() { 592 // No action: Just dequeue. 593 } 594 595 596 //------------------------PredictedCallGenerator------------------------------ 597 // Internal class which handles all out-of-line calls checking receiver type. 598 class PredictedCallGenerator : public CallGenerator { 599 ciKlass* _predicted_receiver; 600 CallGenerator* _if_missed; 601 CallGenerator* _if_hit; 602 float _hit_prob; 603 604 public: 605 PredictedCallGenerator(ciKlass* predicted_receiver, 606 CallGenerator* if_missed, 607 CallGenerator* if_hit, float hit_prob) 608 : CallGenerator(if_missed->method()) 609 { 610 // The call profile data may predict the hit_prob as extreme as 0 or 1. 611 // Remove the extremes values from the range. 612 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 613 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 614 615 _predicted_receiver = predicted_receiver; 616 _if_missed = if_missed; 617 _if_hit = if_hit; 618 _hit_prob = hit_prob; 619 } 620 621 virtual bool is_virtual() const { return true; } 622 virtual bool is_inline() const { return _if_hit->is_inline(); } 623 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 624 625 virtual JVMState* generate(JVMState* jvms); 626 }; 627 628 629 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 630 CallGenerator* if_missed, 631 CallGenerator* if_hit, 632 float hit_prob) { 633 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 634 } 635 636 637 JVMState* PredictedCallGenerator::generate(JVMState* jvms) { 638 GraphKit kit(jvms); 639 PhaseGVN& gvn = kit.gvn(); 640 // We need an explicit receiver null_check before checking its type. 641 // We share a map with the caller, so his JVMS gets adjusted. 642 Node* receiver = kit.argument(0); 643 644 CompileLog* log = kit.C->log(); 645 if (log != NULL) { 646 log->elem("predicted_call bci='%d' klass='%d'", 647 jvms->bci(), log->identify(_predicted_receiver)); 648 } 649 650 receiver = kit.null_check_receiver_before_call(method()); 651 if (kit.stopped()) { 652 return kit.transfer_exceptions_into_jvms(); 653 } 654 655 // Make a copy of the replaced nodes in case we need to restore them 656 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); 657 replaced_nodes.clone(); 658 659 Node* exact_receiver = receiver; // will get updated in place... 660 Node* slow_ctl = kit.type_check_receiver(receiver, 661 _predicted_receiver, _hit_prob, 662 &exact_receiver); 663 664 SafePointNode* slow_map = NULL; 665 JVMState* slow_jvms = NULL; 666 { PreserveJVMState pjvms(&kit); 667 kit.set_control(slow_ctl); 668 if (!kit.stopped()) { 669 slow_jvms = _if_missed->generate(kit.sync_jvms()); 670 if (kit.failing()) 671 return NULL; // might happen because of NodeCountInliningCutoff 672 assert(slow_jvms != NULL, "must be"); 673 kit.add_exception_states_from(slow_jvms); 674 kit.set_map(slow_jvms->map()); 675 if (!kit.stopped()) 676 slow_map = kit.stop(); 677 } 678 } 679 680 if (kit.stopped()) { 681 // Instance exactly does not matches the desired type. 682 kit.set_jvms(slow_jvms); 683 return kit.transfer_exceptions_into_jvms(); 684 } 685 686 // fall through if the instance exactly matches the desired type 687 kit.replace_in_map(receiver, exact_receiver); 688 689 // Make the hot call: 690 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); 691 if (new_jvms == NULL) { 692 // Inline failed, so make a direct call. 693 assert(_if_hit->is_inline(), "must have been a failed inline"); 694 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 695 new_jvms = cg->generate(kit.sync_jvms()); 696 } 697 kit.add_exception_states_from(new_jvms); 698 kit.set_jvms(new_jvms); 699 700 // Need to merge slow and fast? 701 if (slow_map == NULL) { 702 // The fast path is the only path remaining. 703 return kit.transfer_exceptions_into_jvms(); 704 } 705 706 if (kit.stopped()) { 707 // Inlined method threw an exception, so it's just the slow path after all. 708 kit.set_jvms(slow_jvms); 709 return kit.transfer_exceptions_into_jvms(); 710 } 711 712 // There are 2 branches and the replaced nodes are only valid on 713 // one: restore the replaced nodes to what they were before the 714 // branch. 715 kit.map()->set_replaced_nodes(replaced_nodes); 716 717 // Finish the diamond. 718 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 719 RegionNode* region = new (kit.C) RegionNode(3); 720 region->init_req(1, kit.control()); 721 region->init_req(2, slow_map->control()); 722 kit.set_control(gvn.transform(region)); 723 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 724 iophi->set_req(2, slow_map->i_o()); 725 kit.set_i_o(gvn.transform(iophi)); 726 // Merge memory 727 kit.merge_memory(slow_map->merged_memory(), region, 2); 728 // Transform new memory Phis. 729 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 730 Node* phi = mms.memory(); 731 if (phi->is_Phi() && phi->in(0) == region) { 732 mms.set_memory(gvn.transform(phi)); 733 } 734 } 735 uint tos = kit.jvms()->stkoff() + kit.sp(); 736 uint limit = slow_map->req(); 737 for (uint i = TypeFunc::Parms; i < limit; i++) { 738 // Skip unused stack slots; fast forward to monoff(); 739 if (i == tos) { 740 i = kit.jvms()->monoff(); 741 if( i >= limit ) break; 742 } 743 Node* m = kit.map()->in(i); 744 Node* n = slow_map->in(i); 745 if (m != n) { 746 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 747 Node* phi = PhiNode::make(region, m, t); 748 phi->set_req(2, n); 749 kit.map()->set_req(i, gvn.transform(phi)); 750 } 751 } 752 return kit.transfer_exceptions_into_jvms(); 753 } 754 755 756 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 757 assert(callee->is_method_handle_intrinsic() || 758 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch"); 759 bool input_not_const; 760 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); 761 Compile* C = Compile::current(); 762 if (cg != NULL) { 763 if (!delayed_forbidden && AlwaysIncrementalInline) { 764 return CallGenerator::for_late_inline(callee, cg); 765 } else { 766 return cg; 767 } 768 } 769 int bci = jvms->bci(); 770 ciCallProfile profile = caller->call_profile_at_bci(bci); 771 int call_site_count = caller->scale_count(profile.count()); 772 773 if (IncrementalInline && call_site_count > 0 && 774 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { 775 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 776 } else { 777 // Out-of-line call. 778 return CallGenerator::for_direct_call(callee); 779 } 780 } 781 782 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { 783 GraphKit kit(jvms); 784 PhaseGVN& gvn = kit.gvn(); 785 Compile* C = kit.C; 786 vmIntrinsics::ID iid = callee->intrinsic_id(); 787 input_not_const = true; 788 switch (iid) { 789 case vmIntrinsics::_invokeBasic: 790 { 791 // Get MethodHandle receiver: 792 Node* receiver = kit.argument(0); 793 assert(!(ShenandoahBarrierNode::skip_through_barrier(receiver)->is_Con() && !receiver->is_Con()), "barrier prevents optimization"); 794 if (receiver->Opcode() == Op_ConP) { 795 input_not_const = false; 796 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 797 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 798 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove 799 const int vtable_index = Method::invalid_vtable_index; 800 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true); 801 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 802 if (cg != NULL && cg->is_inline()) 803 return cg; 804 } 805 } 806 break; 807 808 case vmIntrinsics::_linkToVirtual: 809 case vmIntrinsics::_linkToStatic: 810 case vmIntrinsics::_linkToSpecial: 811 case vmIntrinsics::_linkToInterface: 812 { 813 // Get MemberName argument: 814 Node* member_name = kit.argument(callee->arg_size() - 1); 815 assert(!(ShenandoahBarrierNode::skip_through_barrier(member_name)->is_Con() && !member_name->is_Con()), "barrier prevents optimization"); 816 if (member_name->Opcode() == Op_ConP) { 817 input_not_const = false; 818 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 819 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 820 821 // In lamda forms we erase signature types to avoid resolving issues 822 // involving class loaders. When we optimize a method handle invoke 823 // to a direct call we must cast the receiver and arguments to its 824 // actual types. 825 ciSignature* signature = target->signature(); 826 const int receiver_skip = target->is_static() ? 0 : 1; 827 // Cast receiver to its type. 828 if (!target->is_static()) { 829 Node* arg = kit.argument(0); 830 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 831 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); 832 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 833 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type)); 834 kit.set_argument(0, cast_obj); 835 } 836 } 837 // Cast reference arguments to its type. 838 for (int i = 0, j = 0; i < signature->count(); i++) { 839 ciType* t = signature->type_at(i); 840 if (t->is_klass()) { 841 Node* arg = kit.argument(receiver_skip + j); 842 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 843 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 844 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 845 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type)); 846 kit.set_argument(receiver_skip + j, cast_obj); 847 } 848 } 849 j += t->size(); // long and double take two slots 850 } 851 852 // Try to get the most accurate receiver type 853 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 854 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 855 int vtable_index = Method::invalid_vtable_index; 856 bool call_does_dispatch = false; 857 858 ciKlass* speculative_receiver_type = NULL; 859 if (is_virtual_or_interface) { 860 ciInstanceKlass* klass = target->holder(); 861 Node* receiver_node = kit.argument(0); 862 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 863 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 864 // optimize_virtual_call() takes 2 different holder 865 // arguments for a corner case that doesn't apply here (see 866 // Parse::do_call()) 867 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass, 868 target, receiver_type, is_virtual, 869 call_does_dispatch, vtable_index, // out-parameters 870 /*check_access=*/false); 871 // We lack profiling at this call but type speculation may 872 // provide us with a type 873 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; 874 } 875 876 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true); 877 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 878 if (cg != NULL && cg->is_inline()) 879 return cg; 880 } 881 } 882 break; 883 884 default: 885 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); 886 break; 887 } 888 return NULL; 889 } 890 891 892 //------------------------PredicatedIntrinsicGenerator------------------------------ 893 // Internal class which handles all predicated Intrinsic calls. 894 class PredicatedIntrinsicGenerator : public CallGenerator { 895 CallGenerator* _intrinsic; 896 CallGenerator* _cg; 897 898 public: 899 PredicatedIntrinsicGenerator(CallGenerator* intrinsic, 900 CallGenerator* cg) 901 : CallGenerator(cg->method()) 902 { 903 _intrinsic = intrinsic; 904 _cg = cg; 905 } 906 907 virtual bool is_virtual() const { return true; } 908 virtual bool is_inlined() const { return true; } 909 virtual bool is_intrinsic() const { return true; } 910 911 virtual JVMState* generate(JVMState* jvms); 912 }; 913 914 915 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, 916 CallGenerator* cg) { 917 return new PredicatedIntrinsicGenerator(intrinsic, cg); 918 } 919 920 921 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { 922 // The code we want to generate here is: 923 // if (receiver == NULL) 924 // uncommon_Trap 925 // if (predicate(0)) 926 // do_intrinsic(0) 927 // else 928 // if (predicate(1)) 929 // do_intrinsic(1) 930 // ... 931 // else 932 // do_java_comp 933 934 GraphKit kit(jvms); 935 PhaseGVN& gvn = kit.gvn(); 936 937 CompileLog* log = kit.C->log(); 938 if (log != NULL) { 939 log->elem("predicated_intrinsic bci='%d' method='%d'", 940 jvms->bci(), log->identify(method())); 941 } 942 943 if (!method()->is_static()) { 944 // We need an explicit receiver null_check before checking its type in predicate. 945 // We share a map with the caller, so his JVMS gets adjusted. 946 Node* receiver = kit.null_check_receiver_before_call(method()); 947 if (kit.stopped()) { 948 return kit.transfer_exceptions_into_jvms(); 949 } 950 } 951 952 int n_predicates = _intrinsic->predicates_count(); 953 assert(n_predicates > 0, "sanity"); 954 955 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); 956 957 // Region for normal compilation code if intrinsic failed. 958 Node* slow_region = new (kit.C) RegionNode(1); 959 960 int results = 0; 961 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { 962 #ifdef ASSERT 963 JVMState* old_jvms = kit.jvms(); 964 SafePointNode* old_map = kit.map(); 965 Node* old_io = old_map->i_o(); 966 Node* old_mem = old_map->memory(); 967 Node* old_exc = old_map->next_exception(); 968 #endif 969 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); 970 #ifdef ASSERT 971 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. 972 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); 973 SafePointNode* new_map = kit.map(); 974 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); 975 assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); 976 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); 977 #endif 978 if (!kit.stopped()) { 979 PreserveJVMState pjvms(&kit); 980 // Generate intrinsic code: 981 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); 982 if (new_jvms == NULL) { 983 // Intrinsic failed, use normal compilation path for this predicate. 984 slow_region->add_req(kit.control()); 985 } else { 986 kit.add_exception_states_from(new_jvms); 987 kit.set_jvms(new_jvms); 988 if (!kit.stopped()) { 989 result_jvms[results++] = kit.jvms(); 990 } 991 } 992 } 993 if (else_ctrl == NULL) { 994 else_ctrl = kit.C->top(); 995 } 996 kit.set_control(else_ctrl); 997 } 998 if (!kit.stopped()) { 999 // Final 'else' after predicates. 1000 slow_region->add_req(kit.control()); 1001 } 1002 if (slow_region->req() > 1) { 1003 PreserveJVMState pjvms(&kit); 1004 // Generate normal compilation code: 1005 kit.set_control(gvn.transform(slow_region)); 1006 JVMState* new_jvms = _cg->generate(kit.sync_jvms()); 1007 if (kit.failing()) 1008 return NULL; // might happen because of NodeCountInliningCutoff 1009 assert(new_jvms != NULL, "must be"); 1010 kit.add_exception_states_from(new_jvms); 1011 kit.set_jvms(new_jvms); 1012 if (!kit.stopped()) { 1013 result_jvms[results++] = kit.jvms(); 1014 } 1015 } 1016 1017 if (results == 0) { 1018 // All paths ended in uncommon traps. 1019 (void) kit.stop(); 1020 return kit.transfer_exceptions_into_jvms(); 1021 } 1022 1023 if (results == 1) { // Only one path 1024 kit.set_jvms(result_jvms[0]); 1025 return kit.transfer_exceptions_into_jvms(); 1026 } 1027 1028 // Merge all paths. 1029 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 1030 RegionNode* region = new (kit.C) RegionNode(results + 1); 1031 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 1032 for (int i = 0; i < results; i++) { 1033 JVMState* jvms = result_jvms[i]; 1034 int path = i + 1; 1035 SafePointNode* map = jvms->map(); 1036 region->init_req(path, map->control()); 1037 iophi->set_req(path, map->i_o()); 1038 if (i == 0) { 1039 kit.set_jvms(jvms); 1040 } else { 1041 kit.merge_memory(map->merged_memory(), region, path); 1042 } 1043 } 1044 kit.set_control(gvn.transform(region)); 1045 kit.set_i_o(gvn.transform(iophi)); 1046 // Transform new memory Phis. 1047 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { 1048 Node* phi = mms.memory(); 1049 if (phi->is_Phi() && phi->in(0) == region) { 1050 mms.set_memory(gvn.transform(phi)); 1051 } 1052 } 1053 1054 // Merge debug info. 1055 Node** ins = NEW_RESOURCE_ARRAY(Node*, results); 1056 uint tos = kit.jvms()->stkoff() + kit.sp(); 1057 Node* map = kit.map(); 1058 uint limit = map->req(); 1059 for (uint i = TypeFunc::Parms; i < limit; i++) { 1060 // Skip unused stack slots; fast forward to monoff(); 1061 if (i == tos) { 1062 i = kit.jvms()->monoff(); 1063 if( i >= limit ) break; 1064 } 1065 Node* n = map->in(i); 1066 ins[0] = n; 1067 const Type* t = gvn.type(n); 1068 bool needs_phi = false; 1069 for (int j = 1; j < results; j++) { 1070 JVMState* jvms = result_jvms[j]; 1071 Node* jmap = jvms->map(); 1072 Node* m = NULL; 1073 if (jmap->req() > i) { 1074 m = jmap->in(i); 1075 if (m != n) { 1076 needs_phi = true; 1077 t = t->meet_speculative(gvn.type(m)); 1078 } 1079 } 1080 ins[j] = m; 1081 } 1082 if (needs_phi) { 1083 Node* phi = PhiNode::make(region, n, t); 1084 for (int j = 1; j < results; j++) { 1085 phi->set_req(j + 1, ins[j]); 1086 } 1087 map->set_req(i, gvn.transform(phi)); 1088 } 1089 } 1090 1091 return kit.transfer_exceptions_into_jvms(); 1092 } 1093 1094 //-------------------------UncommonTrapCallGenerator----------------------------- 1095 // Internal class which handles all out-of-line calls checking receiver type. 1096 class UncommonTrapCallGenerator : public CallGenerator { 1097 Deoptimization::DeoptReason _reason; 1098 Deoptimization::DeoptAction _action; 1099 1100 public: 1101 UncommonTrapCallGenerator(ciMethod* m, 1102 Deoptimization::DeoptReason reason, 1103 Deoptimization::DeoptAction action) 1104 : CallGenerator(m) 1105 { 1106 _reason = reason; 1107 _action = action; 1108 } 1109 1110 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1111 virtual bool is_trap() const { return true; } 1112 1113 virtual JVMState* generate(JVMState* jvms); 1114 }; 1115 1116 1117 CallGenerator* 1118 CallGenerator::for_uncommon_trap(ciMethod* m, 1119 Deoptimization::DeoptReason reason, 1120 Deoptimization::DeoptAction action) { 1121 return new UncommonTrapCallGenerator(m, reason, action); 1122 } 1123 1124 1125 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 1126 GraphKit kit(jvms); 1127 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1128 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 1129 // Use callsite signature always. 1130 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 1131 int nargs = declared_method->arg_size(); 1132 kit.inc_sp(nargs); 1133 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1134 if (_reason == Deoptimization::Reason_class_check && 1135 _action == Deoptimization::Action_maybe_recompile) { 1136 // Temp fix for 6529811 1137 // Don't allow uncommon_trap to override our decision to recompile in the event 1138 // of a class cast failure for a monomorphic call as it will never let us convert 1139 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1140 bool keep_exact_action = true; 1141 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1142 } else { 1143 kit.uncommon_trap(_reason, _action); 1144 } 1145 return kit.transfer_exceptions_into_jvms(); 1146 } 1147 1148 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1149 1150 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1151 1152 #define NODES_OVERHEAD_PER_METHOD (30.0) 1153 #define NODES_PER_BYTECODE (9.5) 1154 1155 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1156 int call_count = profile.count(); 1157 int code_size = call_method->code_size(); 1158 1159 // Expected execution count is based on the historical count: 1160 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1161 1162 // Expected profit from inlining, in units of simple call-overheads. 1163 _profit = 1.0; 1164 1165 // Expected work performed by the call in units of call-overheads. 1166 // %%% need an empirical curve fit for "work" (time in call) 1167 float bytecodes_per_call = 3; 1168 _work = 1.0 + code_size / bytecodes_per_call; 1169 1170 // Expected size of compilation graph: 1171 // -XX:+PrintParseStatistics once reported: 1172 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1173 // Histogram of 144298 parsed bytecodes: 1174 // %%% Need an better predictor for graph size. 1175 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1176 } 1177 1178 // is_cold: Return true if the node should never be inlined. 1179 // This is true if any of the key metrics are extreme. 1180 bool WarmCallInfo::is_cold() const { 1181 if (count() < WarmCallMinCount) return true; 1182 if (profit() < WarmCallMinProfit) return true; 1183 if (work() > WarmCallMaxWork) return true; 1184 if (size() > WarmCallMaxSize) return true; 1185 return false; 1186 } 1187 1188 // is_hot: Return true if the node should be inlined immediately. 1189 // This is true if any of the key metrics are extreme. 1190 bool WarmCallInfo::is_hot() const { 1191 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1192 if (count() >= HotCallCountThreshold) return true; 1193 if (profit() >= HotCallProfitThreshold) return true; 1194 if (work() <= HotCallTrivialWork) return true; 1195 if (size() <= HotCallTrivialSize) return true; 1196 return false; 1197 } 1198 1199 // compute_heat: 1200 float WarmCallInfo::compute_heat() const { 1201 assert(!is_cold(), "compute heat only on warm nodes"); 1202 assert(!is_hot(), "compute heat only on warm nodes"); 1203 int min_size = MAX2(0, (int)HotCallTrivialSize); 1204 int max_size = MIN2(500, (int)WarmCallMaxSize); 1205 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1206 float size_factor; 1207 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1208 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1209 else if (method_size < 0.5) size_factor = 1; // better than avg. 1210 else size_factor = 0.5; // worse than avg. 1211 return (count() * profit() * size_factor); 1212 } 1213 1214 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1215 assert(this != that, "compare only different WCIs"); 1216 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1217 if (this->heat() > that->heat()) return true; 1218 if (this->heat() < that->heat()) return false; 1219 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1220 // Equal heat. Break the tie some other way. 1221 if (!this->call() || !that->call()) return (address)this > (address)that; 1222 return this->call()->_idx > that->call()->_idx; 1223 } 1224 1225 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1226 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1227 1228 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1229 assert(next() == UNINIT_NEXT, "not yet on any list"); 1230 WarmCallInfo* prev_p = NULL; 1231 WarmCallInfo* next_p = head; 1232 while (next_p != NULL && next_p->warmer_than(this)) { 1233 prev_p = next_p; 1234 next_p = prev_p->next(); 1235 } 1236 // Install this between prev_p and next_p. 1237 this->set_next(next_p); 1238 if (prev_p == NULL) 1239 head = this; 1240 else 1241 prev_p->set_next(this); 1242 return head; 1243 } 1244 1245 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1246 WarmCallInfo* prev_p = NULL; 1247 WarmCallInfo* next_p = head; 1248 while (next_p != this) { 1249 assert(next_p != NULL, "this must be in the list somewhere"); 1250 prev_p = next_p; 1251 next_p = prev_p->next(); 1252 } 1253 next_p = this->next(); 1254 debug_only(this->set_next(UNINIT_NEXT)); 1255 // Remove this from between prev_p and next_p. 1256 if (prev_p == NULL) 1257 head = next_p; 1258 else 1259 prev_p->set_next(next_p); 1260 return head; 1261 } 1262 1263 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1264 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1265 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1266 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1267 1268 WarmCallInfo* WarmCallInfo::always_hot() { 1269 assert(_always_hot.is_hot(), "must always be hot"); 1270 return &_always_hot; 1271 } 1272 1273 WarmCallInfo* WarmCallInfo::always_cold() { 1274 assert(_always_cold.is_cold(), "must always be cold"); 1275 return &_always_cold; 1276 } 1277 1278 1279 #ifndef PRODUCT 1280 1281 void WarmCallInfo::print() const { 1282 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1283 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1284 count(), profit(), work(), size(), compute_heat(), next()); 1285 tty->cr(); 1286 if (call() != NULL) call()->dump(); 1287 } 1288 1289 void print_wci(WarmCallInfo* ci) { 1290 ci->print(); 1291 } 1292 1293 void WarmCallInfo::print_all() const { 1294 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1295 p->print(); 1296 } 1297 1298 int WarmCallInfo::count_all() const { 1299 int cnt = 0; 1300 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1301 cnt++; 1302 return cnt; 1303 } 1304 1305 #endif //PRODUCT