1 /*
   2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "ci/ciCallSite.hpp"
  28 #include "ci/ciObjArray.hpp"
  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 
  44 // Utility function.
  45 const TypeFunc* CallGenerator::tf() const {
  46   return TypeFunc::make(method());
  47 }
  48 
  49 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  50   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  51 }
  52 
  53 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  54   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  55   return is_inlined_method_handle_intrinsic(symbolic_info, m);
  56 }
  57 
  58 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
  59   return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
  60 }
  61 
  62 //-----------------------------ParseGenerator---------------------------------
  63 // Internal class which handles all direct bytecode traversal.
  64 class ParseGenerator : public InlineCallGenerator {
  65 private:
  66   bool  _is_osr;
  67   float _expected_uses;
  68 
  69 public:
  70   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  71     : InlineCallGenerator(method)
  72   {
  73     _is_osr        = is_osr;
  74     _expected_uses = expected_uses;
  75     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
  76   }
  77 
  78   virtual bool      is_parse() const           { return true; }
  79   virtual JVMState* generate(JVMState* jvms);
  80   int is_osr() { return _is_osr; }
  81 
  82 };
  83 
  84 JVMState* ParseGenerator::generate(JVMState* jvms) {
  85   Compile* C = Compile::current();
  86   C->print_inlining_update(this);
  87 
  88   if (is_osr()) {
  89     // The JVMS for a OSR has a single argument (see its TypeFunc).
  90     assert(jvms->depth() == 1, "no inline OSR");
  91   }
  92 
  93   if (C->failing()) {
  94     return NULL;  // bailing out of the compile; do not try to parse
  95   }
  96 
  97   Parse parser(jvms, method(), _expected_uses);
  98   // Grab signature for matching/allocation
  99 #ifdef ASSERT
 100   if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
 101     assert(C->env()->system_dictionary_modification_counter_changed(),
 102            "Must invalidate if TypeFuncs differ");
 103   }
 104 #endif
 105 
 106   GraphKit& exits = parser.exits();
 107 
 108   if (C->failing()) {
 109     while (exits.pop_exception_state() != NULL) ;
 110     return NULL;
 111   }
 112 
 113   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 114 
 115   // Simply return the exit state of the parser,
 116   // augmented by any exceptional states.
 117   return exits.transfer_exceptions_into_jvms();
 118 }
 119 
 120 //---------------------------DirectCallGenerator------------------------------
 121 // Internal class which handles all out-of-line calls w/o receiver type checks.
 122 class DirectCallGenerator : public CallGenerator {
 123  private:
 124   CallStaticJavaNode* _call_node;
 125   // Force separate memory and I/O projections for the exceptional
 126   // paths to facilitate late inlinig.
 127   bool                _separate_io_proj;
 128 
 129  public:
 130   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 131     : CallGenerator(method),
 132       _separate_io_proj(separate_io_proj)
 133   {
 134   }
 135   virtual JVMState* generate(JVMState* jvms);
 136 
 137   CallStaticJavaNode* call_node() const { return _call_node; }
 138 };
 139 
 140 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 141   GraphKit kit(jvms);
 142   kit.C->print_inlining_update(this);
 143   bool is_static = method()->is_static();
 144   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 145                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 146 
 147   if (kit.C->log() != NULL) {
 148     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 149   }
 150 
 151   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 152   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 153     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 154     // additional information about the method being invoked should be attached
 155     // to the call site to make resolution logic work
 156     // (see SharedRuntime::resolve_static_call_C).
 157     call->set_override_symbolic_info(true);
 158   }
 159   _call_node = call;  // Save the call node in case we need it later
 160   if (!is_static) {
 161     // Make an explicit receiver null_check as part of this call.
 162     // Since we share a map with the caller, his JVMS gets adjusted.
 163     kit.null_check_receiver_before_call(method());
 164     if (kit.stopped()) {
 165       // And dump it back to the caller, decorated with any exceptions:
 166       return kit.transfer_exceptions_into_jvms();
 167     }
 168     // Mark the call node as virtual, sort of:
 169     call->set_optimized_virtual(true);
 170     if (method()->is_method_handle_intrinsic() ||
 171         method()->is_compiled_lambda_form()) {
 172       call->set_method_handle_invoke(true);
 173     }
 174   }
 175   kit.set_arguments_for_java_call(call);
 176   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 177   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 178   kit.push_node(method()->return_type()->basic_type(), ret);
 179   return kit.transfer_exceptions_into_jvms();
 180 }
 181 
 182 //--------------------------VirtualCallGenerator------------------------------
 183 // Internal class which handles all out-of-line calls checking receiver type.
 184 class VirtualCallGenerator : public CallGenerator {
 185 private:
 186   int _vtable_index;
 187 public:
 188   VirtualCallGenerator(ciMethod* method, int vtable_index)
 189     : CallGenerator(method), _vtable_index(vtable_index)
 190   {
 191     assert(vtable_index == Method::invalid_vtable_index ||
 192            vtable_index >= 0, "either invalid or usable");
 193   }
 194   virtual bool      is_virtual() const          { return true; }
 195   virtual JVMState* generate(JVMState* jvms);
 196 };
 197 
 198 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 199   GraphKit kit(jvms);
 200   Node* receiver = kit.argument(0);
 201 
 202   kit.C->print_inlining_update(this);
 203 
 204   if (kit.C->log() != NULL) {
 205     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 206   }
 207 
 208   // If the receiver is a constant null, do not torture the system
 209   // by attempting to call through it.  The compile will proceed
 210   // correctly, but may bail out in final_graph_reshaping, because
 211   // the call instruction will have a seemingly deficient out-count.
 212   // (The bailout says something misleading about an "infinite loop".)
 213   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 214     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 215     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 216     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 217     kit.inc_sp(arg_size);  // restore arguments
 218     kit.uncommon_trap(Deoptimization::Reason_null_check,
 219                       Deoptimization::Action_none,
 220                       NULL, "null receiver");
 221     return kit.transfer_exceptions_into_jvms();
 222   }
 223 
 224   // Ideally we would unconditionally do a null check here and let it
 225   // be converted to an implicit check based on profile information.
 226   // However currently the conversion to implicit null checks in
 227   // Block::implicit_null_check() only looks for loads and stores, not calls.
 228   ciMethod *caller = kit.method();
 229   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
 230   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 231        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 232        (caller_md->trap_count(Deoptimization::Reason_null_check)
 233        >= (uint)ImplicitNullCheckThreshold))) {
 234     // Make an explicit receiver null_check as part of this call.
 235     // Since we share a map with the caller, his JVMS gets adjusted.
 236     receiver = kit.null_check_receiver_before_call(method());
 237     if (kit.stopped()) {
 238       // And dump it back to the caller, decorated with any exceptions:
 239       return kit.transfer_exceptions_into_jvms();
 240     }
 241   }
 242 
 243   assert(!method()->is_static(), "virtual call must not be to static");
 244   assert(!method()->is_final(), "virtual call should not be to final");
 245   assert(!method()->is_private(), "virtual call should not be to private");
 246   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 247          "no vtable calls if +UseInlineCaches ");
 248   address target = SharedRuntime::get_resolve_virtual_call_stub();
 249   // Normal inline cache used for call
 250   CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
 251   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 252     // To be able to issue a direct call (optimized virtual or virtual)
 253     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 254     // about the method being invoked should be attached to the call site to
 255     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 256     call->set_override_symbolic_info(true);
 257   }
 258   kit.set_arguments_for_java_call(call);
 259   kit.set_edges_for_java_call(call);
 260   Node* ret = kit.set_results_for_java_call(call);
 261   kit.push_node(method()->return_type()->basic_type(), ret);
 262 
 263   // Represent the effect of an implicit receiver null_check
 264   // as part of this call.  Since we share a map with the caller,
 265   // his JVMS gets adjusted.
 266   kit.cast_not_null(receiver);
 267   return kit.transfer_exceptions_into_jvms();
 268 }
 269 
 270 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 271   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 272   return new ParseGenerator(m, expected_uses);
 273 }
 274 
 275 // As a special case, the JVMS passed to this CallGenerator is
 276 // for the method execution already in progress, not just the JVMS
 277 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 278 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 279   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 280   float past_uses = m->interpreter_invocation_count();
 281   float expected_uses = past_uses;
 282   return new ParseGenerator(m, expected_uses, true);
 283 }
 284 
 285 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 286   assert(!m->is_abstract(), "for_direct_call mismatch");
 287   return new DirectCallGenerator(m, separate_io_proj);
 288 }
 289 
 290 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 291   assert(!m->is_static(), "for_virtual_call mismatch");
 292   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 293   return new VirtualCallGenerator(m, vtable_index);
 294 }
 295 
 296 // Allow inlining decisions to be delayed
 297 class LateInlineCallGenerator : public DirectCallGenerator {
 298  private:
 299   // unique id for log compilation
 300   jlong _unique_id;
 301 
 302  protected:
 303   CallGenerator* _inline_cg;
 304   virtual bool do_late_inline_check(JVMState* jvms) { return true; }
 305 
 306  public:
 307   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 308     DirectCallGenerator(method, true), _unique_id(0), _inline_cg(inline_cg) {}
 309 
 310   virtual bool is_late_inline() const { return true; }
 311 
 312   // Convert the CallStaticJava into an inline
 313   virtual void do_late_inline();
 314 
 315   virtual JVMState* generate(JVMState* jvms) {
 316     Compile *C = Compile::current();
 317 
 318     C->log_inline_id(this);
 319 
 320     // Record that this call site should be revisited once the main
 321     // parse is finished.
 322     if (!is_mh_late_inline()) {
 323       C->add_late_inline(this);
 324     }
 325 
 326     // Emit the CallStaticJava and request separate projections so
 327     // that the late inlining logic can distinguish between fall
 328     // through and exceptional uses of the memory and io projections
 329     // as is done for allocations and macro expansion.
 330     return DirectCallGenerator::generate(jvms);
 331   }
 332 
 333   virtual void print_inlining_late(const char* msg) {
 334     CallNode* call = call_node();
 335     Compile* C = Compile::current();
 336     C->print_inlining_assert_ready();
 337     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 338     C->print_inlining_move_to(this);
 339     C->print_inlining_update_delayed(this);
 340   }
 341 
 342   virtual void set_unique_id(jlong id) {
 343     _unique_id = id;
 344   }
 345 
 346   virtual jlong unique_id() const {
 347     return _unique_id;
 348   }
 349 };
 350 
 351 void LateInlineCallGenerator::do_late_inline() {
 352   // Can't inline it
 353   CallStaticJavaNode* call = call_node();
 354   if (call == NULL || call->outcnt() == 0 ||
 355       call->in(0) == NULL || call->in(0)->is_top()) {
 356     return;
 357   }
 358 
 359   const TypeTuple *r = call->tf()->domain();
 360   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 361     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 362       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 363       return;
 364     }
 365   }
 366 
 367   if (call->in(TypeFunc::Memory)->is_top()) {
 368     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 369     return;
 370   }
 371 
 372   // check for unreachable loop
 373   CallProjections callprojs;
 374   call->extract_projections(&callprojs, true);
 375   if (callprojs.fallthrough_catchproj == call->in(0) ||
 376       callprojs.catchall_catchproj == call->in(0) ||
 377       callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
 378       callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
 379       callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
 380       callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
 381       (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
 382       (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
 383     return;
 384   }
 385 
 386   Compile* C = Compile::current();
 387   // Remove inlined methods from Compiler's lists.
 388   if (call->is_macro()) {
 389     C->remove_macro_node(call);
 390   }
 391 
 392   // Make a clone of the JVMState that appropriate to use for driving a parse
 393   JVMState* old_jvms = call->jvms();
 394   JVMState* jvms = old_jvms->clone_shallow(C);
 395   uint size = call->req();
 396   SafePointNode* map = new SafePointNode(size, jvms);
 397   for (uint i1 = 0; i1 < size; i1++) {
 398     map->init_req(i1, call->in(i1));
 399   }
 400 
 401   // Make sure the state is a MergeMem for parsing.
 402   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 403     Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 404     C->initial_gvn()->set_type_bottom(mem);
 405     map->set_req(TypeFunc::Memory, mem);
 406   }
 407 
 408   uint nargs = method()->arg_size();
 409   // blow away old call arguments
 410   Node* top = C->top();
 411   for (uint i1 = 0; i1 < nargs; i1++) {
 412     map->set_req(TypeFunc::Parms + i1, top);
 413   }
 414   jvms->set_map(map);
 415 
 416   // Make enough space in the expression stack to transfer
 417   // the incoming arguments and return value.
 418   map->ensure_stack(jvms, jvms->method()->max_stack());
 419   for (uint i1 = 0; i1 < nargs; i1++) {
 420     map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
 421   }
 422 
 423   C->print_inlining_assert_ready();
 424 
 425   C->print_inlining_move_to(this);
 426 
 427   C->log_late_inline(this);
 428 
 429   // This check is done here because for_method_handle_inline() method
 430   // needs jvms for inlined state.
 431   if (!do_late_inline_check(jvms)) {
 432     map->disconnect_inputs(NULL, C);
 433     return;
 434   }
 435 
 436   // Setup default node notes to be picked up by the inlining
 437   Node_Notes* old_nn = C->node_notes_at(call->_idx);
 438   if (old_nn != NULL) {
 439     Node_Notes* entry_nn = old_nn->clone(C);
 440     entry_nn->set_jvms(jvms);
 441     C->set_default_node_notes(entry_nn);
 442   }
 443 
 444   // Now perform the inlining using the synthesized JVMState
 445   JVMState* new_jvms = _inline_cg->generate(jvms);
 446   if (new_jvms == NULL)  return;  // no change
 447   if (C->failing())      return;
 448 
 449   // Capture any exceptional control flow
 450   GraphKit kit(new_jvms);
 451 
 452   // Find the result object
 453   Node* result = C->top();
 454   int   result_size = method()->return_type()->size();
 455   if (result_size != 0 && !kit.stopped()) {
 456     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 457   }
 458 
 459   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 460   C->env()->notice_inlined_method(_inline_cg->method());
 461   C->set_inlining_progress(true);
 462   C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 463   kit.replace_call(call, result, true);
 464 }
 465 
 466 
 467 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 468   return new LateInlineCallGenerator(method, inline_cg);
 469 }
 470 
 471 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 472   ciMethod* _caller;
 473   int _attempt;
 474   bool _input_not_const;
 475 
 476   virtual bool do_late_inline_check(JVMState* jvms);
 477   virtual bool already_attempted() const { return _attempt > 0; }
 478 
 479  public:
 480   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 481     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 482 
 483   virtual bool is_mh_late_inline() const { return true; }
 484 
 485   virtual JVMState* generate(JVMState* jvms) {
 486     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 487 
 488     Compile* C = Compile::current();
 489     if (_input_not_const) {
 490       // inlining won't be possible so no need to enqueue right now.
 491       call_node()->set_generator(this);
 492     } else {
 493       C->add_late_inline(this);
 494     }
 495     return new_jvms;
 496   }
 497 };
 498 
 499 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 500 
 501   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 502 
 503   Compile::current()->print_inlining_update_delayed(this);
 504 
 505   if (!_input_not_const) {
 506     _attempt++;
 507   }
 508 
 509   if (cg != NULL && cg->is_inline()) {
 510     assert(!cg->is_late_inline(), "we're doing late inlining");
 511     _inline_cg = cg;
 512     Compile::current()->dec_number_of_mh_late_inlines();
 513     return true;
 514   }
 515 
 516   call_node()->set_generator(this);
 517   return false;
 518 }
 519 
 520 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 521   Compile::current()->inc_number_of_mh_late_inlines();
 522   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 523   return cg;
 524 }
 525 
 526 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 527 
 528  public:
 529   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 530     LateInlineCallGenerator(method, inline_cg) {}
 531 
 532   virtual JVMState* generate(JVMState* jvms) {
 533     Compile *C = Compile::current();
 534 
 535     C->log_inline_id(this);
 536 
 537     C->add_string_late_inline(this);
 538 
 539     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 540     return new_jvms;
 541   }
 542 
 543   virtual bool is_string_late_inline() const { return true; }
 544 };
 545 
 546 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 547   return new LateInlineStringCallGenerator(method, inline_cg);
 548 }
 549 
 550 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 551 
 552  public:
 553   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 554     LateInlineCallGenerator(method, inline_cg) {}
 555 
 556   virtual JVMState* generate(JVMState* jvms) {
 557     Compile *C = Compile::current();
 558 
 559     C->log_inline_id(this);
 560 
 561     C->add_boxing_late_inline(this);
 562 
 563     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 564     return new_jvms;
 565   }
 566 };
 567 
 568 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 569   return new LateInlineBoxingCallGenerator(method, inline_cg);
 570 }
 571 
 572 //---------------------------WarmCallGenerator--------------------------------
 573 // Internal class which handles initial deferral of inlining decisions.
 574 class WarmCallGenerator : public CallGenerator {
 575   WarmCallInfo*   _call_info;
 576   CallGenerator*  _if_cold;
 577   CallGenerator*  _if_hot;
 578   bool            _is_virtual;   // caches virtuality of if_cold
 579   bool            _is_inline;    // caches inline-ness of if_hot
 580 
 581 public:
 582   WarmCallGenerator(WarmCallInfo* ci,
 583                     CallGenerator* if_cold,
 584                     CallGenerator* if_hot)
 585     : CallGenerator(if_cold->method())
 586   {
 587     assert(method() == if_hot->method(), "consistent choices");
 588     _call_info  = ci;
 589     _if_cold    = if_cold;
 590     _if_hot     = if_hot;
 591     _is_virtual = if_cold->is_virtual();
 592     _is_inline  = if_hot->is_inline();
 593   }
 594 
 595   virtual bool      is_inline() const           { return _is_inline; }
 596   virtual bool      is_virtual() const          { return _is_virtual; }
 597   virtual bool      is_deferred() const         { return true; }
 598 
 599   virtual JVMState* generate(JVMState* jvms);
 600 };
 601 
 602 
 603 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
 604                                             CallGenerator* if_cold,
 605                                             CallGenerator* if_hot) {
 606   return new WarmCallGenerator(ci, if_cold, if_hot);
 607 }
 608 
 609 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
 610   Compile* C = Compile::current();
 611   C->print_inlining_update(this);
 612 
 613   if (C->log() != NULL) {
 614     C->log()->elem("warm_call bci='%d'", jvms->bci());
 615   }
 616   jvms = _if_cold->generate(jvms);
 617   if (jvms != NULL) {
 618     Node* m = jvms->map()->control();
 619     if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
 620     if (m->is_Catch())     m = m->in(0);  else m = C->top();
 621     if (m->is_Proj())      m = m->in(0);  else m = C->top();
 622     if (m->is_CallJava()) {
 623       _call_info->set_call(m->as_Call());
 624       _call_info->set_hot_cg(_if_hot);
 625 #ifndef PRODUCT
 626       if (PrintOpto || PrintOptoInlining) {
 627         tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
 628         tty->print("WCI: ");
 629         _call_info->print();
 630       }
 631 #endif
 632       _call_info->set_heat(_call_info->compute_heat());
 633       C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
 634     }
 635   }
 636   return jvms;
 637 }
 638 
 639 void WarmCallInfo::make_hot() {
 640   Unimplemented();
 641 }
 642 
 643 void WarmCallInfo::make_cold() {
 644   // No action:  Just dequeue.
 645 }
 646 
 647 
 648 //------------------------PredictedCallGenerator------------------------------
 649 // Internal class which handles all out-of-line calls checking receiver type.
 650 class PredictedCallGenerator : public CallGenerator {
 651   ciKlass*       _predicted_receiver;
 652   CallGenerator* _if_missed;
 653   CallGenerator* _if_hit;
 654   float          _hit_prob;
 655 
 656 public:
 657   PredictedCallGenerator(ciKlass* predicted_receiver,
 658                          CallGenerator* if_missed,
 659                          CallGenerator* if_hit, float hit_prob)
 660     : CallGenerator(if_missed->method())
 661   {
 662     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 663     // Remove the extremes values from the range.
 664     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 665     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 666 
 667     _predicted_receiver = predicted_receiver;
 668     _if_missed          = if_missed;
 669     _if_hit             = if_hit;
 670     _hit_prob           = hit_prob;
 671   }
 672 
 673   virtual bool      is_virtual()   const    { return true; }
 674   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 675   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 676 
 677   virtual JVMState* generate(JVMState* jvms);
 678 };
 679 
 680 
 681 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 682                                                  CallGenerator* if_missed,
 683                                                  CallGenerator* if_hit,
 684                                                  float hit_prob) {
 685   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
 686 }
 687 
 688 
 689 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
 690   GraphKit kit(jvms);
 691   kit.C->print_inlining_update(this);
 692   PhaseGVN& gvn = kit.gvn();
 693   // We need an explicit receiver null_check before checking its type.
 694   // We share a map with the caller, so his JVMS gets adjusted.
 695   Node* receiver = kit.argument(0);
 696   CompileLog* log = kit.C->log();
 697   if (log != NULL) {
 698     log->elem("predicted_call bci='%d' klass='%d'",
 699               jvms->bci(), log->identify(_predicted_receiver));
 700   }
 701 
 702   receiver = kit.null_check_receiver_before_call(method());
 703   if (kit.stopped()) {
 704     return kit.transfer_exceptions_into_jvms();
 705   }
 706 
 707   // Make a copy of the replaced nodes in case we need to restore them
 708   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
 709   replaced_nodes.clone();
 710 
 711   Node* exact_receiver = receiver;  // will get updated in place...
 712   Node* slow_ctl = kit.type_check_receiver(receiver,
 713                                            _predicted_receiver, _hit_prob,
 714                                            &exact_receiver);
 715 
 716   SafePointNode* slow_map = NULL;
 717   JVMState* slow_jvms = NULL;
 718   { PreserveJVMState pjvms(&kit);
 719     kit.set_control(slow_ctl);
 720     if (!kit.stopped()) {
 721       slow_jvms = _if_missed->generate(kit.sync_jvms());
 722       if (kit.failing())
 723         return NULL;  // might happen because of NodeCountInliningCutoff
 724       assert(slow_jvms != NULL, "must be");
 725       kit.add_exception_states_from(slow_jvms);
 726       kit.set_map(slow_jvms->map());
 727       if (!kit.stopped())
 728         slow_map = kit.stop();
 729     }
 730   }
 731 
 732   if (kit.stopped()) {
 733     // Instance exactly does not matches the desired type.
 734     kit.set_jvms(slow_jvms);
 735     return kit.transfer_exceptions_into_jvms();
 736   }
 737 
 738   // fall through if the instance exactly matches the desired type
 739   kit.replace_in_map(receiver, exact_receiver);
 740 
 741   // Make the hot call:
 742   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
 743   if (new_jvms == NULL) {
 744     // Inline failed, so make a direct call.
 745     assert(_if_hit->is_inline(), "must have been a failed inline");
 746     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 747     new_jvms = cg->generate(kit.sync_jvms());
 748   }
 749   kit.add_exception_states_from(new_jvms);
 750   kit.set_jvms(new_jvms);
 751 
 752   // Need to merge slow and fast?
 753   if (slow_map == NULL) {
 754     // The fast path is the only path remaining.
 755     return kit.transfer_exceptions_into_jvms();
 756   }
 757 
 758   if (kit.stopped()) {
 759     // Inlined method threw an exception, so it's just the slow path after all.
 760     kit.set_jvms(slow_jvms);
 761     return kit.transfer_exceptions_into_jvms();
 762   }
 763 
 764   // There are 2 branches and the replaced nodes are only valid on
 765   // one: restore the replaced nodes to what they were before the
 766   // branch.
 767   kit.map()->set_replaced_nodes(replaced_nodes);
 768 
 769   // Finish the diamond.
 770   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
 771   RegionNode* region = new RegionNode(3);
 772   region->init_req(1, kit.control());
 773   region->init_req(2, slow_map->control());
 774   kit.set_control(gvn.transform(region));
 775   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
 776   iophi->set_req(2, slow_map->i_o());
 777   kit.set_i_o(gvn.transform(iophi));
 778   // Merge memory
 779   kit.merge_memory(slow_map->merged_memory(), region, 2);
 780   // Transform new memory Phis.
 781   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
 782     Node* phi = mms.memory();
 783     if (phi->is_Phi() && phi->in(0) == region) {
 784       mms.set_memory(gvn.transform(phi));
 785     }
 786   }
 787   uint tos = kit.jvms()->stkoff() + kit.sp();
 788   uint limit = slow_map->req();
 789   for (uint i = TypeFunc::Parms; i < limit; i++) {
 790     // Skip unused stack slots; fast forward to monoff();
 791     if (i == tos) {
 792       i = kit.jvms()->monoff();
 793       if( i >= limit ) break;
 794     }
 795     Node* m = kit.map()->in(i);
 796     Node* n = slow_map->in(i);
 797     if (m != n) {
 798       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 799       Node* phi = PhiNode::make(region, m, t);
 800       phi->set_req(2, n);
 801       kit.map()->set_req(i, gvn.transform(phi));
 802     }
 803   }
 804   return kit.transfer_exceptions_into_jvms();
 805 }
 806 
 807 
 808 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
 809   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
 810   bool input_not_const;
 811   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
 812   Compile* C = Compile::current();
 813   if (cg != NULL) {
 814     if (!delayed_forbidden && AlwaysIncrementalInline) {
 815       return CallGenerator::for_late_inline(callee, cg);
 816     } else {
 817       return cg;
 818     }
 819   }
 820   int bci = jvms->bci();
 821   ciCallProfile profile = caller->call_profile_at_bci(bci);
 822   int call_site_count = caller->scale_count(profile.count());
 823 
 824   if (IncrementalInline && call_site_count > 0 &&
 825       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
 826     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
 827   } else {
 828     // Out-of-line call.
 829     return CallGenerator::for_direct_call(callee);
 830   }
 831 }
 832 
 833 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
 834   GraphKit kit(jvms);
 835   PhaseGVN& gvn = kit.gvn();
 836   Compile* C = kit.C;
 837   vmIntrinsics::ID iid = callee->intrinsic_id();
 838   input_not_const = true;
 839   switch (iid) {
 840   case vmIntrinsics::_invokeBasic:
 841     {
 842       // Get MethodHandle receiver:
 843       Node* receiver = kit.argument(0);
 844       if (receiver->Opcode() == Op_ConP) {
 845         input_not_const = false;
 846         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 847         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 848         const int vtable_index = Method::invalid_vtable_index;
 849 
 850         if (!ciMethod::is_consistent_info(callee, target)) {
 851           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 852                                  "signatures mismatch");
 853           return NULL;
 854         }
 855 
 856         CallGenerator* cg = C->call_generator(target, vtable_index,
 857                                               false /* call_does_dispatch */,
 858                                               jvms,
 859                                               true /* allow_inline */,
 860                                               PROB_ALWAYS);
 861         return cg;
 862       } else {
 863         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 864                                "receiver not constant");
 865       }
 866     }
 867     break;
 868 
 869   case vmIntrinsics::_linkToVirtual:
 870   case vmIntrinsics::_linkToStatic:
 871   case vmIntrinsics::_linkToSpecial:
 872   case vmIntrinsics::_linkToInterface:
 873     {
 874       // Get MemberName argument:
 875       Node* member_name = kit.argument(callee->arg_size() - 1);
 876       if (member_name->Opcode() == Op_ConP) {
 877         input_not_const = false;
 878         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 879         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 880 
 881         if (!ciMethod::is_consistent_info(callee, target)) {
 882           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 883                                  "signatures mismatch");
 884           return NULL;
 885         }
 886 
 887         // In lambda forms we erase signature types to avoid resolving issues
 888         // involving class loaders.  When we optimize a method handle invoke
 889         // to a direct call we must cast the receiver and arguments to its
 890         // actual types.
 891         ciSignature* signature = target->signature();
 892         const int receiver_skip = target->is_static() ? 0 : 1;
 893         // Cast receiver to its type.
 894         if (!target->is_static()) {
 895           Node* arg = kit.argument(0);
 896           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 897           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
 898           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 899             const Type* recv_type = arg_type->join_speculative(sig_type); // keep speculative part
 900             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
 901             kit.set_argument(0, cast_obj);
 902           }
 903         }
 904         // Cast reference arguments to its type.
 905         for (int i = 0, j = 0; i < signature->count(); i++) {
 906           ciType* t = signature->type_at(i);
 907           if (t->is_klass()) {
 908             Node* arg = kit.argument(receiver_skip + j);
 909             const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 910             const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
 911             if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 912               const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part
 913               Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
 914               kit.set_argument(receiver_skip + j, cast_obj);
 915             }
 916           }
 917           j += t->size();  // long and double take two slots
 918         }
 919 
 920         // Try to get the most accurate receiver type
 921         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
 922         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
 923         int  vtable_index       = Method::invalid_vtable_index;
 924         bool call_does_dispatch = false;
 925 
 926         ciKlass* speculative_receiver_type = NULL;
 927         if (is_virtual_or_interface) {
 928           ciInstanceKlass* klass = target->holder();
 929           Node*             receiver_node = kit.argument(0);
 930           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 931           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 932           // optimize_virtual_call() takes 2 different holder
 933           // arguments for a corner case that doesn't apply here (see
 934           // Parse::do_call())
 935           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
 936                                             target, receiver_type, is_virtual,
 937                                             call_does_dispatch, vtable_index, // out-parameters
 938                                             false /* check_access */);
 939           // We lack profiling at this call but type speculation may
 940           // provide us with a type
 941           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
 942         }
 943         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
 944                                               !StressMethodHandleLinkerInlining /* allow_inline */,
 945                                               PROB_ALWAYS,
 946                                               speculative_receiver_type);
 947         return cg;
 948       } else {
 949         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 950                                "member_name not constant");
 951       }
 952     }
 953     break;
 954 
 955   default:
 956     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
 957     break;
 958   }
 959   return NULL;
 960 }
 961 
 962 
 963 //------------------------PredicatedIntrinsicGenerator------------------------------
 964 // Internal class which handles all predicated Intrinsic calls.
 965 class PredicatedIntrinsicGenerator : public CallGenerator {
 966   CallGenerator* _intrinsic;
 967   CallGenerator* _cg;
 968 
 969 public:
 970   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
 971                                CallGenerator* cg)
 972     : CallGenerator(cg->method())
 973   {
 974     _intrinsic = intrinsic;
 975     _cg        = cg;
 976   }
 977 
 978   virtual bool      is_virtual()   const    { return true; }
 979   virtual bool      is_inlined()   const    { return true; }
 980   virtual bool      is_intrinsic() const    { return true; }
 981 
 982   virtual JVMState* generate(JVMState* jvms);
 983 };
 984 
 985 
 986 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
 987                                                        CallGenerator* cg) {
 988   return new PredicatedIntrinsicGenerator(intrinsic, cg);
 989 }
 990 
 991 
 992 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
 993   // The code we want to generate here is:
 994   //    if (receiver == NULL)
 995   //        uncommon_Trap
 996   //    if (predicate(0))
 997   //        do_intrinsic(0)
 998   //    else
 999   //    if (predicate(1))
1000   //        do_intrinsic(1)
1001   //    ...
1002   //    else
1003   //        do_java_comp
1004 
1005   GraphKit kit(jvms);
1006   PhaseGVN& gvn = kit.gvn();
1007 
1008   CompileLog* log = kit.C->log();
1009   if (log != NULL) {
1010     log->elem("predicated_intrinsic bci='%d' method='%d'",
1011               jvms->bci(), log->identify(method()));
1012   }
1013 
1014   if (!method()->is_static()) {
1015     // We need an explicit receiver null_check before checking its type in predicate.
1016     // We share a map with the caller, so his JVMS gets adjusted.
1017     Node* receiver = kit.null_check_receiver_before_call(method());
1018     if (kit.stopped()) {
1019       return kit.transfer_exceptions_into_jvms();
1020     }
1021   }
1022 
1023   int n_predicates = _intrinsic->predicates_count();
1024   assert(n_predicates > 0, "sanity");
1025 
1026   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1027 
1028   // Region for normal compilation code if intrinsic failed.
1029   Node* slow_region = new RegionNode(1);
1030 
1031   int results = 0;
1032   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1033 #ifdef ASSERT
1034     JVMState* old_jvms = kit.jvms();
1035     SafePointNode* old_map = kit.map();
1036     Node* old_io  = old_map->i_o();
1037     Node* old_mem = old_map->memory();
1038     Node* old_exc = old_map->next_exception();
1039 #endif
1040     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1041 #ifdef ASSERT
1042     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1043     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1044     SafePointNode* new_map = kit.map();
1045     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1046     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1047     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1048 #endif
1049     if (!kit.stopped()) {
1050       PreserveJVMState pjvms(&kit);
1051       // Generate intrinsic code:
1052       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1053       if (new_jvms == NULL) {
1054         // Intrinsic failed, use normal compilation path for this predicate.
1055         slow_region->add_req(kit.control());
1056       } else {
1057         kit.add_exception_states_from(new_jvms);
1058         kit.set_jvms(new_jvms);
1059         if (!kit.stopped()) {
1060           result_jvms[results++] = kit.jvms();
1061         }
1062       }
1063     }
1064     if (else_ctrl == NULL) {
1065       else_ctrl = kit.C->top();
1066     }
1067     kit.set_control(else_ctrl);
1068   }
1069   if (!kit.stopped()) {
1070     // Final 'else' after predicates.
1071     slow_region->add_req(kit.control());
1072   }
1073   if (slow_region->req() > 1) {
1074     PreserveJVMState pjvms(&kit);
1075     // Generate normal compilation code:
1076     kit.set_control(gvn.transform(slow_region));
1077     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1078     if (kit.failing())
1079       return NULL;  // might happen because of NodeCountInliningCutoff
1080     assert(new_jvms != NULL, "must be");
1081     kit.add_exception_states_from(new_jvms);
1082     kit.set_jvms(new_jvms);
1083     if (!kit.stopped()) {
1084       result_jvms[results++] = kit.jvms();
1085     }
1086   }
1087 
1088   if (results == 0) {
1089     // All paths ended in uncommon traps.
1090     (void) kit.stop();
1091     return kit.transfer_exceptions_into_jvms();
1092   }
1093 
1094   if (results == 1) { // Only one path
1095     kit.set_jvms(result_jvms[0]);
1096     return kit.transfer_exceptions_into_jvms();
1097   }
1098 
1099   // Merge all paths.
1100   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1101   RegionNode* region = new RegionNode(results + 1);
1102   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1103   for (int i = 0; i < results; i++) {
1104     JVMState* jvms = result_jvms[i];
1105     int path = i + 1;
1106     SafePointNode* map = jvms->map();
1107     region->init_req(path, map->control());
1108     iophi->set_req(path, map->i_o());
1109     if (i == 0) {
1110       kit.set_jvms(jvms);
1111     } else {
1112       kit.merge_memory(map->merged_memory(), region, path);
1113     }
1114   }
1115   kit.set_control(gvn.transform(region));
1116   kit.set_i_o(gvn.transform(iophi));
1117   // Transform new memory Phis.
1118   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1119     Node* phi = mms.memory();
1120     if (phi->is_Phi() && phi->in(0) == region) {
1121       mms.set_memory(gvn.transform(phi));
1122     }
1123   }
1124 
1125   // Merge debug info.
1126   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1127   uint tos = kit.jvms()->stkoff() + kit.sp();
1128   Node* map = kit.map();
1129   uint limit = map->req();
1130   for (uint i = TypeFunc::Parms; i < limit; i++) {
1131     // Skip unused stack slots; fast forward to monoff();
1132     if (i == tos) {
1133       i = kit.jvms()->monoff();
1134       if( i >= limit ) break;
1135     }
1136     Node* n = map->in(i);
1137     ins[0] = n;
1138     const Type* t = gvn.type(n);
1139     bool needs_phi = false;
1140     for (int j = 1; j < results; j++) {
1141       JVMState* jvms = result_jvms[j];
1142       Node* jmap = jvms->map();
1143       Node* m = NULL;
1144       if (jmap->req() > i) {
1145         m = jmap->in(i);
1146         if (m != n) {
1147           needs_phi = true;
1148           t = t->meet_speculative(gvn.type(m));
1149         }
1150       }
1151       ins[j] = m;
1152     }
1153     if (needs_phi) {
1154       Node* phi = PhiNode::make(region, n, t);
1155       for (int j = 1; j < results; j++) {
1156         phi->set_req(j + 1, ins[j]);
1157       }
1158       map->set_req(i, gvn.transform(phi));
1159     }
1160   }
1161 
1162   return kit.transfer_exceptions_into_jvms();
1163 }
1164 
1165 //-------------------------UncommonTrapCallGenerator-----------------------------
1166 // Internal class which handles all out-of-line calls checking receiver type.
1167 class UncommonTrapCallGenerator : public CallGenerator {
1168   Deoptimization::DeoptReason _reason;
1169   Deoptimization::DeoptAction _action;
1170 
1171 public:
1172   UncommonTrapCallGenerator(ciMethod* m,
1173                             Deoptimization::DeoptReason reason,
1174                             Deoptimization::DeoptAction action)
1175     : CallGenerator(m)
1176   {
1177     _reason = reason;
1178     _action = action;
1179   }
1180 
1181   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
1182   virtual bool      is_trap() const             { return true; }
1183 
1184   virtual JVMState* generate(JVMState* jvms);
1185 };
1186 
1187 
1188 CallGenerator*
1189 CallGenerator::for_uncommon_trap(ciMethod* m,
1190                                  Deoptimization::DeoptReason reason,
1191                                  Deoptimization::DeoptAction action) {
1192   return new UncommonTrapCallGenerator(m, reason, action);
1193 }
1194 
1195 
1196 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1197   GraphKit kit(jvms);
1198   kit.C->print_inlining_update(this);
1199   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1200   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1201   // Use callsite signature always.
1202   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1203   int nargs = declared_method->arg_size();
1204   kit.inc_sp(nargs);
1205   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1206   if (_reason == Deoptimization::Reason_class_check &&
1207       _action == Deoptimization::Action_maybe_recompile) {
1208     // Temp fix for 6529811
1209     // Don't allow uncommon_trap to override our decision to recompile in the event
1210     // of a class cast failure for a monomorphic call as it will never let us convert
1211     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1212     bool keep_exact_action = true;
1213     kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1214   } else {
1215     kit.uncommon_trap(_reason, _action);
1216   }
1217   return kit.transfer_exceptions_into_jvms();
1218 }
1219 
1220 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1221 
1222 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)
1223 
1224 #define NODES_OVERHEAD_PER_METHOD (30.0)
1225 #define NODES_PER_BYTECODE (9.5)
1226 
1227 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1228   int call_count = profile.count();
1229   int code_size = call_method->code_size();
1230 
1231   // Expected execution count is based on the historical count:
1232   _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1233 
1234   // Expected profit from inlining, in units of simple call-overheads.
1235   _profit = 1.0;
1236 
1237   // Expected work performed by the call in units of call-overheads.
1238   // %%% need an empirical curve fit for "work" (time in call)
1239   float bytecodes_per_call = 3;
1240   _work = 1.0 + code_size / bytecodes_per_call;
1241 
1242   // Expected size of compilation graph:
1243   // -XX:+PrintParseStatistics once reported:
1244   //  Methods seen: 9184  Methods parsed: 9184  Nodes created: 1582391
1245   //  Histogram of 144298 parsed bytecodes:
1246   // %%% Need an better predictor for graph size.
1247   _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1248 }
1249 
1250 // is_cold:  Return true if the node should never be inlined.
1251 // This is true if any of the key metrics are extreme.
1252 bool WarmCallInfo::is_cold() const {
1253   if (count()  <  WarmCallMinCount)        return true;
1254   if (profit() <  WarmCallMinProfit)       return true;
1255   if (work()   >  WarmCallMaxWork)         return true;
1256   if (size()   >  WarmCallMaxSize)         return true;
1257   return false;
1258 }
1259 
1260 // is_hot:  Return true if the node should be inlined immediately.
1261 // This is true if any of the key metrics are extreme.
1262 bool WarmCallInfo::is_hot() const {
1263   assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1264   if (count()  >= HotCallCountThreshold)   return true;
1265   if (profit() >= HotCallProfitThreshold)  return true;
1266   if (work()   <= HotCallTrivialWork)      return true;
1267   if (size()   <= HotCallTrivialSize)      return true;
1268   return false;
1269 }
1270 
1271 // compute_heat:
1272 float WarmCallInfo::compute_heat() const {
1273   assert(!is_cold(), "compute heat only on warm nodes");
1274   assert(!is_hot(),  "compute heat only on warm nodes");
1275   int min_size = MAX2(0,   (int)HotCallTrivialSize);
1276   int max_size = MIN2(500, (int)WarmCallMaxSize);
1277   float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1278   float size_factor;
1279   if      (method_size < 0.05)  size_factor = 4;   // 2 sigmas better than avg.
1280   else if (method_size < 0.15)  size_factor = 2;   // 1 sigma better than avg.
1281   else if (method_size < 0.5)   size_factor = 1;   // better than avg.
1282   else                          size_factor = 0.5; // worse than avg.
1283   return (count() * profit() * size_factor);
1284 }
1285 
1286 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1287   assert(this != that, "compare only different WCIs");
1288   assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1289   if (this->heat() > that->heat())   return true;
1290   if (this->heat() < that->heat())   return false;
1291   assert(this->heat() == that->heat(), "no NaN heat allowed");
1292   // Equal heat.  Break the tie some other way.
1293   if (!this->call() || !that->call())  return (address)this > (address)that;
1294   return this->call()->_idx > that->call()->_idx;
1295 }
1296 
1297 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1298 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
1299 
1300 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1301   assert(next() == UNINIT_NEXT, "not yet on any list");
1302   WarmCallInfo* prev_p = NULL;
1303   WarmCallInfo* next_p = head;
1304   while (next_p != NULL && next_p->warmer_than(this)) {
1305     prev_p = next_p;
1306     next_p = prev_p->next();
1307   }
1308   // Install this between prev_p and next_p.
1309   this->set_next(next_p);
1310   if (prev_p == NULL)
1311     head = this;
1312   else
1313     prev_p->set_next(this);
1314   return head;
1315 }
1316 
1317 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1318   WarmCallInfo* prev_p = NULL;
1319   WarmCallInfo* next_p = head;
1320   while (next_p != this) {
1321     assert(next_p != NULL, "this must be in the list somewhere");
1322     prev_p = next_p;
1323     next_p = prev_p->next();
1324   }
1325   next_p = this->next();
1326   debug_only(this->set_next(UNINIT_NEXT));
1327   // Remove this from between prev_p and next_p.
1328   if (prev_p == NULL)
1329     head = next_p;
1330   else
1331     prev_p->set_next(next_p);
1332   return head;
1333 }
1334 
1335 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1336                                        WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1337 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1338                                         WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1339 
1340 WarmCallInfo* WarmCallInfo::always_hot() {
1341   assert(_always_hot.is_hot(), "must always be hot");
1342   return &_always_hot;
1343 }
1344 
1345 WarmCallInfo* WarmCallInfo::always_cold() {
1346   assert(_always_cold.is_cold(), "must always be cold");
1347   return &_always_cold;
1348 }
1349 
1350 
1351 #ifndef PRODUCT
1352 
1353 void WarmCallInfo::print() const {
1354   tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1355              is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1356              count(), profit(), work(), size(), compute_heat(), next());
1357   tty->cr();
1358   if (call() != NULL)  call()->dump();
1359 }
1360 
1361 void print_wci(WarmCallInfo* ci) {
1362   ci->print();
1363 }
1364 
1365 void WarmCallInfo::print_all() const {
1366   for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1367     p->print();
1368 }
1369 
1370 int WarmCallInfo::count_all() const {
1371   int cnt = 0;
1372   for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1373     cnt++;
1374   return cnt;
1375 }
1376 
1377 #endif //PRODUCT