1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "ci/ciCallSite.hpp" 28 #include "ci/ciObjArray.hpp" 29 #include "ci/ciMemberName.hpp" 30 #include "ci/ciMethodHandle.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "compiler/compileLog.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callGenerator.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/parse.hpp" 39 #include "opto/rootnode.hpp" 40 #include "opto/runtime.hpp" 41 #include "opto/subnode.hpp" 42 43 44 // Utility function. 45 const TypeFunc* CallGenerator::tf() const { 46 return TypeFunc::make(method()); 47 } 48 49 //-----------------------------ParseGenerator--------------------------------- 50 // Internal class which handles all direct bytecode traversal. 51 class ParseGenerator : public InlineCallGenerator { 52 private: 53 bool _is_osr; 54 float _expected_uses; 55 56 public: 57 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) 58 : InlineCallGenerator(method) 59 { 60 _is_osr = is_osr; 61 _expected_uses = expected_uses; 62 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); 63 } 64 65 virtual bool is_parse() const { return true; } 66 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 67 int is_osr() { return _is_osr; } 68 69 }; 70 71 JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) { 72 Compile* C = Compile::current(); 73 C->print_inlining_update(this); 74 75 if (is_osr()) { 76 // The JVMS for a OSR has a single argument (see its TypeFunc). 77 assert(jvms->depth() == 1, "no inline OSR"); 78 } 79 80 if (C->failing()) { 81 return NULL; // bailing out of the compile; do not try to parse 82 } 83 84 Parse parser(jvms, method(), _expected_uses, parent_parser); 85 // Grab signature for matching/allocation 86 #ifdef ASSERT 87 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { 88 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); 89 assert(C->env()->system_dictionary_modification_counter_changed(), 90 "Must invalidate if TypeFuncs differ"); 91 } 92 #endif 93 94 GraphKit& exits = parser.exits(); 95 96 if (C->failing()) { 97 while (exits.pop_exception_state() != NULL) ; 98 return NULL; 99 } 100 101 assert(exits.jvms()->same_calls_as(jvms), "sanity"); 102 103 // Simply return the exit state of the parser, 104 // augmented by any exceptional states. 105 return exits.transfer_exceptions_into_jvms(); 106 } 107 108 //---------------------------DirectCallGenerator------------------------------ 109 // Internal class which handles all out-of-line calls w/o receiver type checks. 110 class DirectCallGenerator : public CallGenerator { 111 private: 112 CallStaticJavaNode* _call_node; 113 // Force separate memory and I/O projections for the exceptional 114 // paths to facilitate late inlinig. 115 bool _separate_io_proj; 116 117 public: 118 DirectCallGenerator(ciMethod* method, bool separate_io_proj) 119 : CallGenerator(method), 120 _separate_io_proj(separate_io_proj) 121 { 122 } 123 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 124 125 CallStaticJavaNode* call_node() const { return _call_node; } 126 }; 127 128 JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { 129 GraphKit kit(jvms); 130 kit.C->print_inlining_update(this); 131 bool is_static = method()->is_static(); 132 address target = is_static ? SharedRuntime::get_resolve_static_call_stub() 133 : SharedRuntime::get_resolve_opt_virtual_call_stub(); 134 135 if (kit.C->log() != NULL) { 136 kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); 137 } 138 139 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); 140 _call_node = call; // Save the call node in case we need it later 141 if (!is_static) { 142 // Make an explicit receiver null_check as part of this call. 143 // Since we share a map with the caller, his JVMS gets adjusted. 144 kit.null_check_receiver_before_call(method()); 145 if (kit.stopped()) { 146 // And dump it back to the caller, decorated with any exceptions: 147 return kit.transfer_exceptions_into_jvms(); 148 } 149 // Mark the call node as virtual, sort of: 150 call->set_optimized_virtual(true); 151 if (method()->is_method_handle_intrinsic() || 152 method()->is_compiled_lambda_form()) { 153 call->set_method_handle_invoke(true); 154 } 155 } 156 kit.set_arguments_for_java_call(call); 157 kit.set_edges_for_java_call(call, false, _separate_io_proj); 158 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); 159 kit.push_node(method()->return_type()->basic_type(), ret); 160 return kit.transfer_exceptions_into_jvms(); 161 } 162 163 //--------------------------VirtualCallGenerator------------------------------ 164 // Internal class which handles all out-of-line calls checking receiver type. 165 class VirtualCallGenerator : public CallGenerator { 166 private: 167 int _vtable_index; 168 public: 169 VirtualCallGenerator(ciMethod* method, int vtable_index) 170 : CallGenerator(method), _vtable_index(vtable_index) 171 { 172 assert(vtable_index == Method::invalid_vtable_index || 173 vtable_index >= 0, "either invalid or usable"); 174 } 175 virtual bool is_virtual() const { return true; } 176 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 177 }; 178 179 JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { 180 GraphKit kit(jvms); 181 Node* receiver = kit.argument(0); 182 183 kit.C->print_inlining_update(this); 184 185 if (kit.C->log() != NULL) { 186 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); 187 } 188 189 // If the receiver is a constant null, do not torture the system 190 // by attempting to call through it. The compile will proceed 191 // correctly, but may bail out in final_graph_reshaping, because 192 // the call instruction will have a seemingly deficient out-count. 193 // (The bailout says something misleading about an "infinite loop".) 194 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 195 kit.inc_sp(method()->arg_size()); // restore arguments 196 kit.uncommon_trap(Deoptimization::Reason_null_check, 197 Deoptimization::Action_none, 198 NULL, "null receiver"); 199 return kit.transfer_exceptions_into_jvms(); 200 } 201 202 // Ideally we would unconditionally do a null check here and let it 203 // be converted to an implicit check based on profile information. 204 // However currently the conversion to implicit null checks in 205 // Block::implicit_null_check() only looks for loads and stores, not calls. 206 ciMethod *caller = kit.method(); 207 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); 208 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || 209 ((ImplicitNullCheckThreshold > 0) && caller_md && 210 (caller_md->trap_count(Deoptimization::Reason_null_check) 211 >= (uint)ImplicitNullCheckThreshold))) { 212 // Make an explicit receiver null_check as part of this call. 213 // Since we share a map with the caller, his JVMS gets adjusted. 214 receiver = kit.null_check_receiver_before_call(method()); 215 if (kit.stopped()) { 216 // And dump it back to the caller, decorated with any exceptions: 217 return kit.transfer_exceptions_into_jvms(); 218 } 219 } 220 221 assert(!method()->is_static(), "virtual call must not be to static"); 222 assert(!method()->is_final(), "virtual call should not be to final"); 223 assert(!method()->is_private(), "virtual call should not be to private"); 224 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches, 225 "no vtable calls if +UseInlineCaches "); 226 address target = SharedRuntime::get_resolve_virtual_call_stub(); 227 // Normal inline cache used for call 228 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); 229 kit.set_arguments_for_java_call(call); 230 kit.set_edges_for_java_call(call); 231 Node* ret = kit.set_results_for_java_call(call); 232 kit.push_node(method()->return_type()->basic_type(), ret); 233 234 // Represent the effect of an implicit receiver null_check 235 // as part of this call. Since we share a map with the caller, 236 // his JVMS gets adjusted. 237 kit.cast_not_null(receiver); 238 return kit.transfer_exceptions_into_jvms(); 239 } 240 241 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { 242 if (InlineTree::check_can_parse(m) != NULL) return NULL; 243 return new ParseGenerator(m, expected_uses); 244 } 245 246 // As a special case, the JVMS passed to this CallGenerator is 247 // for the method execution already in progress, not just the JVMS 248 // of the caller. Thus, this CallGenerator cannot be mixed with others! 249 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { 250 if (InlineTree::check_can_parse(m) != NULL) return NULL; 251 float past_uses = m->interpreter_invocation_count(); 252 float expected_uses = past_uses; 253 return new ParseGenerator(m, expected_uses, true); 254 } 255 256 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { 257 assert(!m->is_abstract(), "for_direct_call mismatch"); 258 return new DirectCallGenerator(m, separate_io_proj); 259 } 260 261 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { 262 assert(!m->is_static(), "for_virtual_call mismatch"); 263 assert(!m->is_method_handle_intrinsic(), "should be a direct call"); 264 return new VirtualCallGenerator(m, vtable_index); 265 } 266 267 // Allow inlining decisions to be delayed 268 class LateInlineCallGenerator : public DirectCallGenerator { 269 private: 270 // unique id for log compilation 271 jlong _unique_id; 272 273 protected: 274 CallGenerator* _inline_cg; 275 virtual bool do_late_inline_check(JVMState* jvms) { return true; } 276 277 public: 278 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 279 DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {} 280 281 virtual bool is_late_inline() const { return true; } 282 283 // Convert the CallStaticJava into an inline 284 virtual void do_late_inline(); 285 286 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { 287 Compile *C = Compile::current(); 288 289 C->log_inline_id(this); 290 291 // Record that this call site should be revisited once the main 292 // parse is finished. 293 if (!is_mh_late_inline()) { 294 C->add_late_inline(this); 295 } 296 297 // Emit the CallStaticJava and request separate projections so 298 // that the late inlining logic can distinguish between fall 299 // through and exceptional uses of the memory and io projections 300 // as is done for allocations and macro expansion. 301 return DirectCallGenerator::generate(jvms, parent_parser); 302 } 303 304 virtual void print_inlining_late(const char* msg) { 305 CallNode* call = call_node(); 306 Compile* C = Compile::current(); 307 C->print_inlining_assert_ready(); 308 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); 309 C->print_inlining_move_to(this); 310 C->print_inlining_update_delayed(this); 311 } 312 313 virtual void set_unique_id(jlong id) { 314 _unique_id = id; 315 } 316 317 virtual jlong unique_id() const { 318 return _unique_id; 319 } 320 }; 321 322 void LateInlineCallGenerator::do_late_inline() { 323 // Can't inline it 324 CallStaticJavaNode* call = call_node(); 325 if (call == NULL || call->outcnt() == 0 || 326 call->in(0) == NULL || call->in(0)->is_top()) { 327 return; 328 } 329 330 const TypeTuple *r = call->tf()->domain(); 331 for (int i1 = 0; i1 < method()->arg_size(); i1++) { 332 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) { 333 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 334 return; 335 } 336 } 337 338 if (call->in(TypeFunc::Memory)->is_top()) { 339 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing"); 340 return; 341 } 342 343 Compile* C = Compile::current(); 344 // Remove inlined methods from Compiler's lists. 345 if (call->is_macro()) { 346 C->remove_macro_node(call); 347 } 348 349 // Make a clone of the JVMState that appropriate to use for driving a parse 350 JVMState* old_jvms = call->jvms(); 351 JVMState* jvms = old_jvms->clone_shallow(C); 352 uint size = call->req(); 353 SafePointNode* map = new SafePointNode(size, jvms); 354 for (uint i1 = 0; i1 < size; i1++) { 355 map->init_req(i1, call->in(i1)); 356 } 357 358 // Make sure the state is a MergeMem for parsing. 359 if (!map->in(TypeFunc::Memory)->is_MergeMem()) { 360 Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory)); 361 C->initial_gvn()->set_type_bottom(mem); 362 map->set_req(TypeFunc::Memory, mem); 363 } 364 365 uint nargs = method()->arg_size(); 366 // blow away old call arguments 367 Node* top = C->top(); 368 for (uint i1 = 0; i1 < nargs; i1++) { 369 map->set_req(TypeFunc::Parms + i1, top); 370 } 371 jvms->set_map(map); 372 373 // Make enough space in the expression stack to transfer 374 // the incoming arguments and return value. 375 map->ensure_stack(jvms, jvms->method()->max_stack()); 376 for (uint i1 = 0; i1 < nargs; i1++) { 377 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); 378 } 379 380 C->print_inlining_assert_ready(); 381 382 C->print_inlining_move_to(this); 383 384 C->log_late_inline(this); 385 386 // This check is done here because for_method_handle_inline() method 387 // needs jvms for inlined state. 388 if (!do_late_inline_check(jvms)) { 389 map->disconnect_inputs(NULL, C); 390 return; 391 } 392 393 // Setup default node notes to be picked up by the inlining 394 Node_Notes* old_nn = C->node_notes_at(call->_idx); 395 if (old_nn != NULL) { 396 Node_Notes* entry_nn = old_nn->clone(C); 397 entry_nn->set_jvms(jvms); 398 C->set_default_node_notes(entry_nn); 399 } 400 401 // Now perform the inlining using the synthesized JVMState 402 JVMState* new_jvms = _inline_cg->generate(jvms, NULL); 403 if (new_jvms == NULL) return; // no change 404 if (C->failing()) return; 405 406 // Capture any exceptional control flow 407 GraphKit kit(new_jvms); 408 409 // Find the result object 410 Node* result = C->top(); 411 int result_size = method()->return_type()->size(); 412 if (result_size != 0 && !kit.stopped()) { 413 result = (result_size == 1) ? kit.pop() : kit.pop_pair(); 414 } 415 416 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops()); 417 C->env()->notice_inlined_method(_inline_cg->method()); 418 C->set_inlining_progress(true); 419 420 kit.replace_call(call, result); 421 } 422 423 424 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { 425 return new LateInlineCallGenerator(method, inline_cg); 426 } 427 428 class LateInlineMHCallGenerator : public LateInlineCallGenerator { 429 ciMethod* _caller; 430 int _attempt; 431 bool _input_not_const; 432 433 virtual bool do_late_inline_check(JVMState* jvms); 434 virtual bool already_attempted() const { return _attempt > 0; } 435 436 public: 437 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : 438 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} 439 440 virtual bool is_mh_late_inline() const { return true; } 441 442 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { 443 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser); 444 445 Compile* C = Compile::current(); 446 if (_input_not_const) { 447 // inlining won't be possible so no need to enqueue right now. 448 call_node()->set_generator(this); 449 } else { 450 C->add_late_inline(this); 451 } 452 return new_jvms; 453 } 454 }; 455 456 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { 457 458 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); 459 460 Compile::current()->print_inlining_update_delayed(this); 461 462 if (!_input_not_const) { 463 _attempt++; 464 } 465 466 if (cg != NULL) { 467 assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining"); 468 _inline_cg = cg; 469 Compile::current()->dec_number_of_mh_late_inlines(); 470 return true; 471 } 472 473 call_node()->set_generator(this); 474 return false; 475 } 476 477 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) { 478 Compile::current()->inc_number_of_mh_late_inlines(); 479 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const); 480 return cg; 481 } 482 483 class LateInlineStringCallGenerator : public LateInlineCallGenerator { 484 485 public: 486 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 487 LateInlineCallGenerator(method, inline_cg) {} 488 489 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { 490 Compile *C = Compile::current(); 491 492 C->log_inline_id(this); 493 494 C->add_string_late_inline(this); 495 496 JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); 497 return new_jvms; 498 } 499 500 virtual bool is_string_late_inline() const { return true; } 501 }; 502 503 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { 504 return new LateInlineStringCallGenerator(method, inline_cg); 505 } 506 507 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { 508 509 public: 510 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : 511 LateInlineCallGenerator(method, inline_cg) {} 512 513 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { 514 Compile *C = Compile::current(); 515 516 C->log_inline_id(this); 517 518 C->add_boxing_late_inline(this); 519 520 JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); 521 return new_jvms; 522 } 523 }; 524 525 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { 526 return new LateInlineBoxingCallGenerator(method, inline_cg); 527 } 528 529 //---------------------------WarmCallGenerator-------------------------------- 530 // Internal class which handles initial deferral of inlining decisions. 531 class WarmCallGenerator : public CallGenerator { 532 WarmCallInfo* _call_info; 533 CallGenerator* _if_cold; 534 CallGenerator* _if_hot; 535 bool _is_virtual; // caches virtuality of if_cold 536 bool _is_inline; // caches inline-ness of if_hot 537 538 public: 539 WarmCallGenerator(WarmCallInfo* ci, 540 CallGenerator* if_cold, 541 CallGenerator* if_hot) 542 : CallGenerator(if_cold->method()) 543 { 544 assert(method() == if_hot->method(), "consistent choices"); 545 _call_info = ci; 546 _if_cold = if_cold; 547 _if_hot = if_hot; 548 _is_virtual = if_cold->is_virtual(); 549 _is_inline = if_hot->is_inline(); 550 } 551 552 virtual bool is_inline() const { return _is_inline; } 553 virtual bool is_virtual() const { return _is_virtual; } 554 virtual bool is_deferred() const { return true; } 555 556 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 557 }; 558 559 560 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, 561 CallGenerator* if_cold, 562 CallGenerator* if_hot) { 563 return new WarmCallGenerator(ci, if_cold, if_hot); 564 } 565 566 JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { 567 Compile* C = Compile::current(); 568 C->print_inlining_update(this); 569 570 if (C->log() != NULL) { 571 C->log()->elem("warm_call bci='%d'", jvms->bci()); 572 } 573 jvms = _if_cold->generate(jvms, parent_parser); 574 if (jvms != NULL) { 575 Node* m = jvms->map()->control(); 576 if (m->is_CatchProj()) m = m->in(0); else m = C->top(); 577 if (m->is_Catch()) m = m->in(0); else m = C->top(); 578 if (m->is_Proj()) m = m->in(0); else m = C->top(); 579 if (m->is_CallJava()) { 580 _call_info->set_call(m->as_Call()); 581 _call_info->set_hot_cg(_if_hot); 582 #ifndef PRODUCT 583 if (PrintOpto || PrintOptoInlining) { 584 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); 585 tty->print("WCI: "); 586 _call_info->print(); 587 } 588 #endif 589 _call_info->set_heat(_call_info->compute_heat()); 590 C->set_warm_calls(_call_info->insert_into(C->warm_calls())); 591 } 592 } 593 return jvms; 594 } 595 596 void WarmCallInfo::make_hot() { 597 Unimplemented(); 598 } 599 600 void WarmCallInfo::make_cold() { 601 // No action: Just dequeue. 602 } 603 604 605 //------------------------PredictedCallGenerator------------------------------ 606 // Internal class which handles all out-of-line calls checking receiver type. 607 class PredictedCallGenerator : public CallGenerator { 608 ciKlass* _predicted_receiver; 609 CallGenerator* _if_missed; 610 CallGenerator* _if_hit; 611 float _hit_prob; 612 613 public: 614 PredictedCallGenerator(ciKlass* predicted_receiver, 615 CallGenerator* if_missed, 616 CallGenerator* if_hit, float hit_prob) 617 : CallGenerator(if_missed->method()) 618 { 619 // The call profile data may predict the hit_prob as extreme as 0 or 1. 620 // Remove the extremes values from the range. 621 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; 622 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; 623 624 _predicted_receiver = predicted_receiver; 625 _if_missed = if_missed; 626 _if_hit = if_hit; 627 _hit_prob = hit_prob; 628 } 629 630 virtual bool is_virtual() const { return true; } 631 virtual bool is_inline() const { return _if_hit->is_inline(); } 632 virtual bool is_deferred() const { return _if_hit->is_deferred(); } 633 634 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 635 }; 636 637 638 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, 639 CallGenerator* if_missed, 640 CallGenerator* if_hit, 641 float hit_prob) { 642 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); 643 } 644 645 646 JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { 647 GraphKit kit(jvms); 648 kit.C->print_inlining_update(this); 649 PhaseGVN& gvn = kit.gvn(); 650 // We need an explicit receiver null_check before checking its type. 651 // We share a map with the caller, so his JVMS gets adjusted. 652 Node* receiver = kit.argument(0); 653 654 CompileLog* log = kit.C->log(); 655 if (log != NULL) { 656 log->elem("predicted_call bci='%d' klass='%d'", 657 jvms->bci(), log->identify(_predicted_receiver)); 658 } 659 660 receiver = kit.null_check_receiver_before_call(method()); 661 if (kit.stopped()) { 662 return kit.transfer_exceptions_into_jvms(); 663 } 664 665 Node* exact_receiver = receiver; // will get updated in place... 666 Node* slow_ctl = kit.type_check_receiver(receiver, 667 _predicted_receiver, _hit_prob, 668 &exact_receiver); 669 670 SafePointNode* slow_map = NULL; 671 JVMState* slow_jvms; 672 { PreserveJVMState pjvms(&kit); 673 kit.set_control(slow_ctl); 674 if (!kit.stopped()) { 675 slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser); 676 if (kit.failing()) 677 return NULL; // might happen because of NodeCountInliningCutoff 678 assert(slow_jvms != NULL, "must be"); 679 kit.add_exception_states_from(slow_jvms); 680 kit.set_map(slow_jvms->map()); 681 if (!kit.stopped()) 682 slow_map = kit.stop(); 683 } 684 } 685 686 if (kit.stopped()) { 687 // Instance exactly does not matches the desired type. 688 kit.set_jvms(slow_jvms); 689 return kit.transfer_exceptions_into_jvms(); 690 } 691 692 // fall through if the instance exactly matches the desired type 693 kit.replace_in_map(receiver, exact_receiver); 694 695 // Make the hot call: 696 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser); 697 if (new_jvms == NULL) { 698 // Inline failed, so make a direct call. 699 assert(_if_hit->is_inline(), "must have been a failed inline"); 700 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); 701 new_jvms = cg->generate(kit.sync_jvms(), parent_parser); 702 } 703 kit.add_exception_states_from(new_jvms); 704 kit.set_jvms(new_jvms); 705 706 // Need to merge slow and fast? 707 if (slow_map == NULL) { 708 // The fast path is the only path remaining. 709 return kit.transfer_exceptions_into_jvms(); 710 } 711 712 if (kit.stopped()) { 713 // Inlined method threw an exception, so it's just the slow path after all. 714 kit.set_jvms(slow_jvms); 715 return kit.transfer_exceptions_into_jvms(); 716 } 717 718 // Finish the diamond. 719 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 720 RegionNode* region = new RegionNode(3); 721 region->init_req(1, kit.control()); 722 region->init_req(2, slow_map->control()); 723 kit.set_control(gvn.transform(region)); 724 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 725 iophi->set_req(2, slow_map->i_o()); 726 kit.set_i_o(gvn.transform(iophi)); 727 kit.merge_memory(slow_map->merged_memory(), region, 2); 728 uint tos = kit.jvms()->stkoff() + kit.sp(); 729 uint limit = slow_map->req(); 730 for (uint i = TypeFunc::Parms; i < limit; i++) { 731 // Skip unused stack slots; fast forward to monoff(); 732 if (i == tos) { 733 i = kit.jvms()->monoff(); 734 if( i >= limit ) break; 735 } 736 Node* m = kit.map()->in(i); 737 Node* n = slow_map->in(i); 738 if (m != n) { 739 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 740 Node* phi = PhiNode::make(region, m, t); 741 phi->set_req(2, n); 742 kit.map()->set_req(i, gvn.transform(phi)); 743 } 744 } 745 return kit.transfer_exceptions_into_jvms(); 746 } 747 748 749 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { 750 assert(callee->is_method_handle_intrinsic() || 751 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch"); 752 bool input_not_const; 753 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); 754 Compile* C = Compile::current(); 755 if (cg != NULL) { 756 if (!delayed_forbidden && AlwaysIncrementalInline) { 757 return CallGenerator::for_late_inline(callee, cg); 758 } else { 759 return cg; 760 } 761 } 762 int bci = jvms->bci(); 763 ciCallProfile profile = caller->call_profile_at_bci(bci); 764 int call_site_count = caller->scale_count(profile.count()); 765 766 if (IncrementalInline && call_site_count > 0 && 767 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { 768 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); 769 } else { 770 // Out-of-line call. 771 return CallGenerator::for_direct_call(callee); 772 } 773 } 774 775 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { 776 GraphKit kit(jvms); 777 PhaseGVN& gvn = kit.gvn(); 778 Compile* C = kit.C; 779 vmIntrinsics::ID iid = callee->intrinsic_id(); 780 input_not_const = true; 781 switch (iid) { 782 case vmIntrinsics::_invokeBasic: 783 { 784 // Get MethodHandle receiver: 785 Node* receiver = kit.argument(0); 786 if (receiver->Opcode() == Op_ConP) { 787 input_not_const = false; 788 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); 789 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); 790 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove 791 const int vtable_index = Method::invalid_vtable_index; 792 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true); 793 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 794 if (cg != NULL && cg->is_inline()) 795 return cg; 796 } else { 797 const char* msg = "receiver not constant"; 798 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); 799 C->log_inline_failure(msg); 800 } 801 } 802 break; 803 804 case vmIntrinsics::_linkToVirtual: 805 case vmIntrinsics::_linkToStatic: 806 case vmIntrinsics::_linkToSpecial: 807 case vmIntrinsics::_linkToInterface: 808 { 809 // Get MemberName argument: 810 Node* member_name = kit.argument(callee->arg_size() - 1); 811 if (member_name->Opcode() == Op_ConP) { 812 input_not_const = false; 813 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); 814 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); 815 816 // In lamda forms we erase signature types to avoid resolving issues 817 // involving class loaders. When we optimize a method handle invoke 818 // to a direct call we must cast the receiver and arguments to its 819 // actual types. 820 ciSignature* signature = target->signature(); 821 const int receiver_skip = target->is_static() ? 0 : 1; 822 // Cast receiver to its type. 823 if (!target->is_static()) { 824 Node* arg = kit.argument(0); 825 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 826 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); 827 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 828 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 829 kit.set_argument(0, cast_obj); 830 } 831 } 832 // Cast reference arguments to its type. 833 for (int i = 0; i < signature->count(); i++) { 834 ciType* t = signature->type_at(i); 835 if (t->is_klass()) { 836 Node* arg = kit.argument(receiver_skip + i); 837 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); 838 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); 839 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { 840 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); 841 kit.set_argument(receiver_skip + i, cast_obj); 842 } 843 } 844 } 845 846 // Try to get the most accurate receiver type 847 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual); 848 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface); 849 int vtable_index = Method::invalid_vtable_index; 850 bool call_does_dispatch = false; 851 852 ciKlass* speculative_receiver_type = NULL; 853 if (is_virtual_or_interface) { 854 ciInstanceKlass* klass = target->holder(); 855 Node* receiver_node = kit.argument(0); 856 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr(); 857 // call_does_dispatch and vtable_index are out-parameters. They might be changed. 858 target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type, 859 is_virtual, 860 call_does_dispatch, vtable_index); // out-parameters 861 // We lack profiling at this call but type speculation may 862 // provide us with a type 863 speculative_receiver_type = receiver_type->speculative_type(); 864 } 865 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true); 866 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); 867 if (cg != NULL && cg->is_inline()) 868 return cg; 869 } else { 870 const char* msg = "member_name not constant"; 871 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); 872 C->log_inline_failure(msg); 873 } 874 } 875 break; 876 877 default: 878 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); 879 break; 880 } 881 return NULL; 882 } 883 884 885 //------------------------PredictedIntrinsicGenerator------------------------------ 886 // Internal class which handles all predicted Intrinsic calls. 887 class PredictedIntrinsicGenerator : public CallGenerator { 888 CallGenerator* _intrinsic; 889 CallGenerator* _cg; 890 891 public: 892 PredictedIntrinsicGenerator(CallGenerator* intrinsic, 893 CallGenerator* cg) 894 : CallGenerator(cg->method()) 895 { 896 _intrinsic = intrinsic; 897 _cg = cg; 898 } 899 900 virtual bool is_virtual() const { return true; } 901 virtual bool is_inlined() const { return true; } 902 virtual bool is_intrinsic() const { return true; } 903 904 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 905 }; 906 907 908 CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic, 909 CallGenerator* cg) { 910 return new PredictedIntrinsicGenerator(intrinsic, cg); 911 } 912 913 914 JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) { 915 GraphKit kit(jvms); 916 PhaseGVN& gvn = kit.gvn(); 917 918 CompileLog* log = kit.C->log(); 919 if (log != NULL) { 920 log->elem("predicted_intrinsic bci='%d' method='%d'", 921 jvms->bci(), log->identify(method())); 922 } 923 924 Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms()); 925 if (kit.failing()) 926 return NULL; // might happen because of NodeCountInliningCutoff 927 928 kit.C->print_inlining_update(this); 929 SafePointNode* slow_map = NULL; 930 JVMState* slow_jvms; 931 if (slow_ctl != NULL) { 932 PreserveJVMState pjvms(&kit); 933 kit.set_control(slow_ctl); 934 if (!kit.stopped()) { 935 slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser); 936 if (kit.failing()) 937 return NULL; // might happen because of NodeCountInliningCutoff 938 assert(slow_jvms != NULL, "must be"); 939 kit.add_exception_states_from(slow_jvms); 940 kit.set_map(slow_jvms->map()); 941 if (!kit.stopped()) 942 slow_map = kit.stop(); 943 } 944 } 945 946 if (kit.stopped()) { 947 // Predicate is always false. 948 kit.set_jvms(slow_jvms); 949 return kit.transfer_exceptions_into_jvms(); 950 } 951 952 // Generate intrinsic code: 953 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser); 954 if (new_jvms == NULL) { 955 // Intrinsic failed, so use slow code or make a direct call. 956 if (slow_map == NULL) { 957 CallGenerator* cg = CallGenerator::for_direct_call(method()); 958 new_jvms = cg->generate(kit.sync_jvms(), parent_parser); 959 } else { 960 kit.set_jvms(slow_jvms); 961 return kit.transfer_exceptions_into_jvms(); 962 } 963 } 964 kit.add_exception_states_from(new_jvms); 965 kit.set_jvms(new_jvms); 966 967 // Need to merge slow and fast? 968 if (slow_map == NULL) { 969 // The fast path is the only path remaining. 970 return kit.transfer_exceptions_into_jvms(); 971 } 972 973 if (kit.stopped()) { 974 // Intrinsic method threw an exception, so it's just the slow path after all. 975 kit.set_jvms(slow_jvms); 976 return kit.transfer_exceptions_into_jvms(); 977 } 978 979 // Finish the diamond. 980 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization 981 RegionNode* region = new RegionNode(3); 982 region->init_req(1, kit.control()); 983 region->init_req(2, slow_map->control()); 984 kit.set_control(gvn.transform(region)); 985 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); 986 iophi->set_req(2, slow_map->i_o()); 987 kit.set_i_o(gvn.transform(iophi)); 988 kit.merge_memory(slow_map->merged_memory(), region, 2); 989 uint tos = kit.jvms()->stkoff() + kit.sp(); 990 uint limit = slow_map->req(); 991 for (uint i = TypeFunc::Parms; i < limit; i++) { 992 // Skip unused stack slots; fast forward to monoff(); 993 if (i == tos) { 994 i = kit.jvms()->monoff(); 995 if( i >= limit ) break; 996 } 997 Node* m = kit.map()->in(i); 998 Node* n = slow_map->in(i); 999 if (m != n) { 1000 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); 1001 Node* phi = PhiNode::make(region, m, t); 1002 phi->set_req(2, n); 1003 kit.map()->set_req(i, gvn.transform(phi)); 1004 } 1005 } 1006 return kit.transfer_exceptions_into_jvms(); 1007 } 1008 1009 //-------------------------UncommonTrapCallGenerator----------------------------- 1010 // Internal class which handles all out-of-line calls checking receiver type. 1011 class UncommonTrapCallGenerator : public CallGenerator { 1012 Deoptimization::DeoptReason _reason; 1013 Deoptimization::DeoptAction _action; 1014 1015 public: 1016 UncommonTrapCallGenerator(ciMethod* m, 1017 Deoptimization::DeoptReason reason, 1018 Deoptimization::DeoptAction action) 1019 : CallGenerator(m) 1020 { 1021 _reason = reason; 1022 _action = action; 1023 } 1024 1025 virtual bool is_virtual() const { ShouldNotReachHere(); return false; } 1026 virtual bool is_trap() const { return true; } 1027 1028 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); 1029 }; 1030 1031 1032 CallGenerator* 1033 CallGenerator::for_uncommon_trap(ciMethod* m, 1034 Deoptimization::DeoptReason reason, 1035 Deoptimization::DeoptAction action) { 1036 return new UncommonTrapCallGenerator(m, reason, action); 1037 } 1038 1039 1040 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { 1041 GraphKit kit(jvms); 1042 kit.C->print_inlining_update(this); 1043 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 1044 int nargs = method()->arg_size(); 1045 kit.inc_sp(nargs); 1046 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 1047 if (_reason == Deoptimization::Reason_class_check && 1048 _action == Deoptimization::Action_maybe_recompile) { 1049 // Temp fix for 6529811 1050 // Don't allow uncommon_trap to override our decision to recompile in the event 1051 // of a class cast failure for a monomorphic call as it will never let us convert 1052 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops 1053 bool keep_exact_action = true; 1054 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); 1055 } else { 1056 kit.uncommon_trap(_reason, _action); 1057 } 1058 return kit.transfer_exceptions_into_jvms(); 1059 } 1060 1061 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) 1062 1063 // (Node: Merged hook_up_exits into ParseGenerator::generate.) 1064 1065 #define NODES_OVERHEAD_PER_METHOD (30.0) 1066 #define NODES_PER_BYTECODE (9.5) 1067 1068 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { 1069 int call_count = profile.count(); 1070 int code_size = call_method->code_size(); 1071 1072 // Expected execution count is based on the historical count: 1073 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); 1074 1075 // Expected profit from inlining, in units of simple call-overheads. 1076 _profit = 1.0; 1077 1078 // Expected work performed by the call in units of call-overheads. 1079 // %%% need an empirical curve fit for "work" (time in call) 1080 float bytecodes_per_call = 3; 1081 _work = 1.0 + code_size / bytecodes_per_call; 1082 1083 // Expected size of compilation graph: 1084 // -XX:+PrintParseStatistics once reported: 1085 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 1086 // Histogram of 144298 parsed bytecodes: 1087 // %%% Need an better predictor for graph size. 1088 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); 1089 } 1090 1091 // is_cold: Return true if the node should never be inlined. 1092 // This is true if any of the key metrics are extreme. 1093 bool WarmCallInfo::is_cold() const { 1094 if (count() < WarmCallMinCount) return true; 1095 if (profit() < WarmCallMinProfit) return true; 1096 if (work() > WarmCallMaxWork) return true; 1097 if (size() > WarmCallMaxSize) return true; 1098 return false; 1099 } 1100 1101 // is_hot: Return true if the node should be inlined immediately. 1102 // This is true if any of the key metrics are extreme. 1103 bool WarmCallInfo::is_hot() const { 1104 assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); 1105 if (count() >= HotCallCountThreshold) return true; 1106 if (profit() >= HotCallProfitThreshold) return true; 1107 if (work() <= HotCallTrivialWork) return true; 1108 if (size() <= HotCallTrivialSize) return true; 1109 return false; 1110 } 1111 1112 // compute_heat: 1113 float WarmCallInfo::compute_heat() const { 1114 assert(!is_cold(), "compute heat only on warm nodes"); 1115 assert(!is_hot(), "compute heat only on warm nodes"); 1116 int min_size = MAX2(0, (int)HotCallTrivialSize); 1117 int max_size = MIN2(500, (int)WarmCallMaxSize); 1118 float method_size = (size() - min_size) / MAX2(1, max_size - min_size); 1119 float size_factor; 1120 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. 1121 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. 1122 else if (method_size < 0.5) size_factor = 1; // better than avg. 1123 else size_factor = 0.5; // worse than avg. 1124 return (count() * profit() * size_factor); 1125 } 1126 1127 bool WarmCallInfo::warmer_than(WarmCallInfo* that) { 1128 assert(this != that, "compare only different WCIs"); 1129 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); 1130 if (this->heat() > that->heat()) return true; 1131 if (this->heat() < that->heat()) return false; 1132 assert(this->heat() == that->heat(), "no NaN heat allowed"); 1133 // Equal heat. Break the tie some other way. 1134 if (!this->call() || !that->call()) return (address)this > (address)that; 1135 return this->call()->_idx > that->call()->_idx; 1136 } 1137 1138 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) 1139 #define UNINIT_NEXT ((WarmCallInfo*)NULL) 1140 1141 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { 1142 assert(next() == UNINIT_NEXT, "not yet on any list"); 1143 WarmCallInfo* prev_p = NULL; 1144 WarmCallInfo* next_p = head; 1145 while (next_p != NULL && next_p->warmer_than(this)) { 1146 prev_p = next_p; 1147 next_p = prev_p->next(); 1148 } 1149 // Install this between prev_p and next_p. 1150 this->set_next(next_p); 1151 if (prev_p == NULL) 1152 head = this; 1153 else 1154 prev_p->set_next(this); 1155 return head; 1156 } 1157 1158 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { 1159 WarmCallInfo* prev_p = NULL; 1160 WarmCallInfo* next_p = head; 1161 while (next_p != this) { 1162 assert(next_p != NULL, "this must be in the list somewhere"); 1163 prev_p = next_p; 1164 next_p = prev_p->next(); 1165 } 1166 next_p = this->next(); 1167 debug_only(this->set_next(UNINIT_NEXT)); 1168 // Remove this from between prev_p and next_p. 1169 if (prev_p == NULL) 1170 head = next_p; 1171 else 1172 prev_p->set_next(next_p); 1173 return head; 1174 } 1175 1176 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), 1177 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); 1178 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), 1179 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); 1180 1181 WarmCallInfo* WarmCallInfo::always_hot() { 1182 assert(_always_hot.is_hot(), "must always be hot"); 1183 return &_always_hot; 1184 } 1185 1186 WarmCallInfo* WarmCallInfo::always_cold() { 1187 assert(_always_cold.is_cold(), "must always be cold"); 1188 return &_always_cold; 1189 } 1190 1191 1192 #ifndef PRODUCT 1193 1194 void WarmCallInfo::print() const { 1195 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", 1196 is_cold() ? "cold" : is_hot() ? "hot " : "warm", 1197 count(), profit(), work(), size(), compute_heat(), next()); 1198 tty->cr(); 1199 if (call() != NULL) call()->dump(); 1200 } 1201 1202 void print_wci(WarmCallInfo* ci) { 1203 ci->print(); 1204 } 1205 1206 void WarmCallInfo::print_all() const { 1207 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1208 p->print(); 1209 } 1210 1211 int WarmCallInfo::count_all() const { 1212 int cnt = 0; 1213 for (const WarmCallInfo* p = this; p != NULL; p = p->next()) 1214 cnt++; 1215 return cnt; 1216 } 1217 1218 #endif //PRODUCT