1 /*
   2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "ci/ciCallSite.hpp"
  28 #include "ci/ciObjArray.hpp"
  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 
  44 // Utility function.
  45 const TypeFunc* CallGenerator::tf() const {
  46   return TypeFunc::make(method());
  47 }
  48 
  49 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  50   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  51 }
  52 
  53 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  54   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  55   return is_inlined_method_handle_intrinsic(symbolic_info, m);
  56 }
  57 
  58 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
  59   return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
  60 }
  61 
  62 //-----------------------------ParseGenerator---------------------------------
  63 // Internal class which handles all direct bytecode traversal.
  64 class ParseGenerator : public InlineCallGenerator {
  65 private:
  66   bool  _is_osr;
  67   float _expected_uses;
  68 
  69 public:
  70   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  71     : InlineCallGenerator(method)
  72   {
  73     _is_osr        = is_osr;
  74     _expected_uses = expected_uses;
  75     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
  76   }
  77 
  78   virtual bool      is_parse() const           { return true; }
  79   virtual JVMState* generate(JVMState* jvms);
  80   int is_osr() { return _is_osr; }
  81 
  82 };
  83 
  84 JVMState* ParseGenerator::generate(JVMState* jvms) {
  85   Compile* C = Compile::current();
  86   C->print_inlining_update(this);
  87 
  88   if (is_osr()) {
  89     // The JVMS for a OSR has a single argument (see its TypeFunc).
  90     assert(jvms->depth() == 1, "no inline OSR");
  91   }
  92 
  93   if (C->failing()) {
  94     return NULL;  // bailing out of the compile; do not try to parse
  95   }
  96 
  97   Parse parser(jvms, method(), _expected_uses);
  98   // Grab signature for matching/allocation
  99 #ifdef ASSERT
 100   if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
 101     assert(C->env()->system_dictionary_modification_counter_changed(),
 102            "Must invalidate if TypeFuncs differ");
 103   }
 104 #endif
 105 
 106   GraphKit& exits = parser.exits();
 107 
 108   if (C->failing()) {
 109     while (exits.pop_exception_state() != NULL) ;
 110     return NULL;
 111   }
 112 
 113   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 114 
 115   // Simply return the exit state of the parser,
 116   // augmented by any exceptional states.
 117   return exits.transfer_exceptions_into_jvms();
 118 }
 119 
 120 //---------------------------DirectCallGenerator------------------------------
 121 // Internal class which handles all out-of-line calls w/o receiver type checks.
 122 class DirectCallGenerator : public CallGenerator {
 123  private:
 124   CallStaticJavaNode* _call_node;
 125   // Force separate memory and I/O projections for the exceptional
 126   // paths to facilitate late inlinig.
 127   bool                _separate_io_proj;
 128 
 129  public:
 130   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 131     : CallGenerator(method),
 132       _separate_io_proj(separate_io_proj)
 133   {
 134   }
 135   virtual JVMState* generate(JVMState* jvms);
 136 
 137   CallStaticJavaNode* call_node() const { return _call_node; }
 138 };
 139 
 140 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 141   GraphKit kit(jvms);
 142   kit.C->print_inlining_update(this);
 143   bool is_static = method()->is_static();
 144   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 145                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 146 
 147   if (kit.C->log() != NULL) {
 148     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 149   }
 150 
 151   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 152   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 153     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 154     // additional information about the method being invoked should be attached
 155     // to the call site to make resolution logic work
 156     // (see SharedRuntime::resolve_static_call_C).
 157     call->set_override_symbolic_info(true);
 158   }
 159   _call_node = call;  // Save the call node in case we need it later
 160   if (!is_static) {
 161     // Make an explicit receiver null_check as part of this call.
 162     // Since we share a map with the caller, his JVMS gets adjusted.
 163     kit.null_check_receiver_before_call(method());
 164     if (kit.stopped()) {
 165       // And dump it back to the caller, decorated with any exceptions:
 166       return kit.transfer_exceptions_into_jvms();
 167     }
 168     // Mark the call node as virtual, sort of:
 169     call->set_optimized_virtual(true);
 170     if (method()->is_method_handle_intrinsic() ||
 171         method()->is_compiled_lambda_form()) {
 172       call->set_method_handle_invoke(true);
 173     }
 174   }
 175   kit.set_arguments_for_java_call(call);
 176   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 177   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 178   kit.push_node(method()->return_type()->basic_type(), ret);
 179   return kit.transfer_exceptions_into_jvms();
 180 }
 181 
 182 //--------------------------VirtualCallGenerator------------------------------
 183 // Internal class which handles all out-of-line calls checking receiver type.
 184 class VirtualCallGenerator : public CallGenerator {
 185 private:
 186   int _vtable_index;
 187 public:
 188   VirtualCallGenerator(ciMethod* method, int vtable_index)
 189     : CallGenerator(method), _vtable_index(vtable_index)
 190   {
 191     assert(vtable_index == Method::invalid_vtable_index ||
 192            vtable_index >= 0, "either invalid or usable");
 193   }
 194   virtual bool      is_virtual() const          { return true; }
 195   virtual JVMState* generate(JVMState* jvms);
 196 };
 197 
 198 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 199   GraphKit kit(jvms);
 200   Node* receiver = kit.argument(0);
 201 
 202   kit.C->print_inlining_update(this);
 203 
 204   if (kit.C->log() != NULL) {
 205     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 206   }
 207 
 208   // If the receiver is a constant null, do not torture the system
 209   // by attempting to call through it.  The compile will proceed
 210   // correctly, but may bail out in final_graph_reshaping, because
 211   // the call instruction will have a seemingly deficient out-count.
 212   // (The bailout says something misleading about an "infinite loop".)
 213   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 214     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 215     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 216     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 217     kit.inc_sp(arg_size);  // restore arguments
 218     kit.uncommon_trap(Deoptimization::Reason_null_check,
 219                       Deoptimization::Action_none,
 220                       NULL, "null receiver");
 221     return kit.transfer_exceptions_into_jvms();
 222   }
 223 
 224   // Ideally we would unconditionally do a null check here and let it
 225   // be converted to an implicit check based on profile information.
 226   // However currently the conversion to implicit null checks in
 227   // Block::implicit_null_check() only looks for loads and stores, not calls.
 228   ciMethod *caller = kit.method();
 229   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
 230   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 231        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 232        (caller_md->trap_count(Deoptimization::Reason_null_check)
 233        >= (uint)ImplicitNullCheckThreshold))) {
 234     // Make an explicit receiver null_check as part of this call.
 235     // Since we share a map with the caller, his JVMS gets adjusted.
 236     receiver = kit.null_check_receiver_before_call(method());
 237     if (kit.stopped()) {
 238       // And dump it back to the caller, decorated with any exceptions:
 239       return kit.transfer_exceptions_into_jvms();
 240     }
 241   }
 242 
 243   assert(!method()->is_static(), "virtual call must not be to static");
 244   assert(!method()->is_final(), "virtual call should not be to final");
 245   assert(!method()->is_private(), "virtual call should not be to private");
 246   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 247          "no vtable calls if +UseInlineCaches ");
 248   address target = SharedRuntime::get_resolve_virtual_call_stub();
 249   // Normal inline cache used for call
 250   CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
 251   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 252     // To be able to issue a direct call (optimized virtual or virtual)
 253     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 254     // about the method being invoked should be attached to the call site to
 255     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 256     call->set_override_symbolic_info(true);
 257   }
 258   kit.set_arguments_for_java_call(call);
 259   kit.set_edges_for_java_call(call);
 260   Node* ret = kit.set_results_for_java_call(call);
 261   kit.push_node(method()->return_type()->basic_type(), ret);
 262 
 263   // Represent the effect of an implicit receiver null_check
 264   // as part of this call.  Since we share a map with the caller,
 265   // his JVMS gets adjusted.
 266   kit.cast_not_null(receiver);
 267   return kit.transfer_exceptions_into_jvms();
 268 }
 269 
 270 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 271   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 272   return new ParseGenerator(m, expected_uses);
 273 }
 274 
 275 // As a special case, the JVMS passed to this CallGenerator is
 276 // for the method execution already in progress, not just the JVMS
 277 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 278 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 279   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 280   float past_uses = m->interpreter_invocation_count();
 281   float expected_uses = past_uses;
 282   return new ParseGenerator(m, expected_uses, true);
 283 }
 284 
 285 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 286   assert(!m->is_abstract(), "for_direct_call mismatch");
 287   return new DirectCallGenerator(m, separate_io_proj);
 288 }
 289 
 290 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 291   assert(!m->is_static(), "for_virtual_call mismatch");
 292   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 293   return new VirtualCallGenerator(m, vtable_index);
 294 }
 295 
 296 // Allow inlining decisions to be delayed
 297 class LateInlineCallGenerator : public DirectCallGenerator {
 298  private:
 299   // unique id for log compilation
 300   jlong _unique_id;
 301 
 302  protected:
 303   CallGenerator* _inline_cg;
 304   virtual bool do_late_inline_check(JVMState* jvms) { return true; }
 305 
 306  public:
 307   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 308     DirectCallGenerator(method, true), _unique_id(0), _inline_cg(inline_cg) {}
 309 
 310   virtual bool is_late_inline() const { return true; }
 311 
 312   // Convert the CallStaticJava into an inline
 313   virtual void do_late_inline();
 314 
 315   virtual JVMState* generate(JVMState* jvms) {
 316     Compile *C = Compile::current();
 317 
 318     C->log_inline_id(this);
 319 
 320     // Record that this call site should be revisited once the main
 321     // parse is finished.
 322     if (!is_mh_late_inline()) {
 323       C->add_late_inline(this);
 324     }
 325 
 326     // Emit the CallStaticJava and request separate projections so
 327     // that the late inlining logic can distinguish between fall
 328     // through and exceptional uses of the memory and io projections
 329     // as is done for allocations and macro expansion.
 330     return DirectCallGenerator::generate(jvms);
 331   }
 332 
 333   virtual void print_inlining_late(const char* msg) {
 334     CallNode* call = call_node();
 335     Compile* C = Compile::current();
 336     C->print_inlining_assert_ready();
 337     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 338     C->print_inlining_move_to(this);
 339     C->print_inlining_update_delayed(this);
 340   }
 341 
 342   virtual void set_unique_id(jlong id) {
 343     _unique_id = id;
 344   }
 345 
 346   virtual jlong unique_id() const {
 347     return _unique_id;
 348   }
 349 };
 350 
 351 void LateInlineCallGenerator::do_late_inline() {
 352   // Can't inline it
 353   CallStaticJavaNode* call = call_node();
 354   if (call == NULL || call->outcnt() == 0 ||
 355       call->in(0) == NULL || call->in(0)->is_top()) {
 356     return;
 357   }
 358 
 359   const TypeTuple *r = call->tf()->domain();
 360   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 361     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 362       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 363       return;
 364     }
 365   }
 366 
 367   if (call->in(TypeFunc::Memory)->is_top()) {
 368     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 369     return;
 370   }
 371 
 372   // check for unreachable loop
 373   CallProjections callprojs;
 374   call->extract_projections(&callprojs, true);
 375   if (callprojs.fallthrough_catchproj == call->in(0) ||
 376       callprojs.catchall_catchproj == call->in(0) ||
 377       callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
 378       callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
 379       callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
 380       callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
 381       (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
 382       (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
 383     return;
 384   }
 385 
 386   Compile* C = Compile::current();
 387   // Remove inlined methods from Compiler's lists.
 388   if (call->is_macro()) {
 389     C->remove_macro_node(call);
 390   }
 391 
 392   // Make a clone of the JVMState that appropriate to use for driving a parse
 393   JVMState* old_jvms = call->jvms();
 394   JVMState* jvms = old_jvms->clone_shallow(C);
 395   uint size = call->req();
 396   SafePointNode* map = new SafePointNode(size, jvms);
 397   for (uint i1 = 0; i1 < size; i1++) {
 398     map->init_req(i1, call->in(i1));
 399   }
 400 
 401   // Make sure the state is a MergeMem for parsing.
 402   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 403     Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 404     C->initial_gvn()->set_type_bottom(mem);
 405     map->set_req(TypeFunc::Memory, mem);
 406   }
 407 
 408   uint nargs = method()->arg_size();
 409   // blow away old call arguments
 410   Node* top = C->top();
 411   for (uint i1 = 0; i1 < nargs; i1++) {
 412     map->set_req(TypeFunc::Parms + i1, top);
 413   }
 414   jvms->set_map(map);
 415 
 416   // Make enough space in the expression stack to transfer
 417   // the incoming arguments and return value.
 418   map->ensure_stack(jvms, jvms->method()->max_stack());
 419   for (uint i1 = 0; i1 < nargs; i1++) {
 420     map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
 421   }
 422 
 423   C->print_inlining_assert_ready();
 424 
 425   C->print_inlining_move_to(this);
 426 
 427   C->log_late_inline(this);
 428 
 429   // This check is done here because for_method_handle_inline() method
 430   // needs jvms for inlined state.
 431   if (!do_late_inline_check(jvms)) {
 432     map->disconnect_inputs(NULL, C);
 433     return;
 434   }
 435 
 436   // Setup default node notes to be picked up by the inlining
 437   Node_Notes* old_nn = C->node_notes_at(call->_idx);
 438   if (old_nn != NULL) {
 439     Node_Notes* entry_nn = old_nn->clone(C);
 440     entry_nn->set_jvms(jvms);
 441     C->set_default_node_notes(entry_nn);
 442   }
 443 
 444   // Now perform the inlining using the synthesized JVMState
 445   JVMState* new_jvms = _inline_cg->generate(jvms);
 446   if (new_jvms == NULL)  return;  // no change
 447   if (C->failing())      return;
 448 
 449   // Capture any exceptional control flow
 450   GraphKit kit(new_jvms);
 451 
 452   // Find the result object
 453   Node* result = C->top();
 454   int   result_size = method()->return_type()->size();
 455   if (result_size != 0 && !kit.stopped()) {
 456     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 457   }
 458 
 459   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 460   C->env()->notice_inlined_method(_inline_cg->method());
 461   C->set_inlining_progress(true);
 462   C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 463   kit.replace_call(call, result, true);
 464 }
 465 
 466 
 467 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 468   return new LateInlineCallGenerator(method, inline_cg);
 469 }
 470 
 471 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 472   ciMethod* _caller;
 473   int _attempt;
 474   bool _input_not_const;
 475 
 476   virtual bool do_late_inline_check(JVMState* jvms);
 477   virtual bool already_attempted() const { return _attempt > 0; }
 478 
 479  public:
 480   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 481     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 482 
 483   virtual bool is_mh_late_inline() const { return true; }
 484 
 485   virtual JVMState* generate(JVMState* jvms) {
 486     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 487 
 488     Compile* C = Compile::current();
 489     if (_input_not_const) {
 490       // inlining won't be possible so no need to enqueue right now.
 491       call_node()->set_generator(this);
 492     } else {
 493       C->add_late_inline(this);
 494     }
 495     return new_jvms;
 496   }
 497 };
 498 
 499 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 500 
 501   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 502 
 503   Compile::current()->print_inlining_update_delayed(this);
 504 
 505   if (!_input_not_const) {
 506     _attempt++;
 507   }
 508 
 509   if (cg != NULL && cg->is_inline()) {
 510     assert(!cg->is_late_inline(), "we're doing late inlining");
 511     _inline_cg = cg;
 512     Compile::current()->dec_number_of_mh_late_inlines();
 513     return true;
 514   }
 515 
 516   call_node()->set_generator(this);
 517   return false;
 518 }
 519 
 520 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 521   Compile::current()->inc_number_of_mh_late_inlines();
 522   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 523   return cg;
 524 }
 525 
 526 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 527 
 528  public:
 529   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 530     LateInlineCallGenerator(method, inline_cg) {}
 531 
 532   virtual JVMState* generate(JVMState* jvms) {
 533     Compile *C = Compile::current();
 534 
 535     C->log_inline_id(this);
 536 
 537     C->add_string_late_inline(this);
 538 
 539     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 540     return new_jvms;
 541   }
 542 
 543   virtual bool is_string_late_inline() const { return true; }
 544 };
 545 
 546 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 547   return new LateInlineStringCallGenerator(method, inline_cg);
 548 }
 549 
 550 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 551 
 552  public:
 553   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 554     LateInlineCallGenerator(method, inline_cg) {}
 555 
 556   virtual JVMState* generate(JVMState* jvms) {
 557     Compile *C = Compile::current();
 558 
 559     C->log_inline_id(this);
 560 
 561     C->add_boxing_late_inline(this);
 562 
 563     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 564     return new_jvms;
 565   }
 566 };
 567 
 568 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 569   return new LateInlineBoxingCallGenerator(method, inline_cg);
 570 }
 571 
 572 //---------------------------WarmCallGenerator--------------------------------
 573 // Internal class which handles initial deferral of inlining decisions.
 574 class WarmCallGenerator : public CallGenerator {
 575   WarmCallInfo*   _call_info;
 576   CallGenerator*  _if_cold;
 577   CallGenerator*  _if_hot;
 578   bool            _is_virtual;   // caches virtuality of if_cold
 579   bool            _is_inline;    // caches inline-ness of if_hot
 580 
 581 public:
 582   WarmCallGenerator(WarmCallInfo* ci,
 583                     CallGenerator* if_cold,
 584                     CallGenerator* if_hot)
 585     : CallGenerator(if_cold->method())
 586   {
 587     assert(method() == if_hot->method(), "consistent choices");
 588     _call_info  = ci;
 589     _if_cold    = if_cold;
 590     _if_hot     = if_hot;
 591     _is_virtual = if_cold->is_virtual();
 592     _is_inline  = if_hot->is_inline();
 593   }
 594 
 595   virtual bool      is_inline() const           { return _is_inline; }
 596   virtual bool      is_virtual() const          { return _is_virtual; }
 597   virtual bool      is_deferred() const         { return true; }
 598 
 599   virtual JVMState* generate(JVMState* jvms);
 600 };
 601 
 602 
 603 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
 604                                             CallGenerator* if_cold,
 605                                             CallGenerator* if_hot) {
 606   return new WarmCallGenerator(ci, if_cold, if_hot);
 607 }
 608 
 609 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
 610   Compile* C = Compile::current();
 611   C->print_inlining_update(this);
 612 
 613   if (C->log() != NULL) {
 614     C->log()->elem("warm_call bci='%d'", jvms->bci());
 615   }
 616   jvms = _if_cold->generate(jvms);
 617   if (jvms != NULL) {
 618     Node* m = jvms->map()->control();
 619     if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
 620     if (m->is_Catch())     m = m->in(0);  else m = C->top();
 621     if (m->is_Proj())      m = m->in(0);  else m = C->top();
 622     if (m->is_CallJava()) {
 623       _call_info->set_call(m->as_Call());
 624       _call_info->set_hot_cg(_if_hot);
 625 #ifndef PRODUCT
 626       if (PrintOpto || PrintOptoInlining) {
 627         tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
 628         tty->print("WCI: ");
 629         _call_info->print();
 630       }
 631 #endif
 632       _call_info->set_heat(_call_info->compute_heat());
 633       C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
 634     }
 635   }
 636   return jvms;
 637 }
 638 
 639 void WarmCallInfo::make_hot() {
 640   Unimplemented();
 641 }
 642 
 643 void WarmCallInfo::make_cold() {
 644   // No action:  Just dequeue.
 645 }
 646 
 647 
 648 //------------------------PredictedCallGenerator------------------------------
 649 // Internal class which handles all out-of-line calls checking receiver type.
 650 class PredictedCallGenerator : public CallGenerator {
 651   ciKlass*       _predicted_receiver;
 652   CallGenerator* _if_missed;
 653   CallGenerator* _if_hit;
 654   float          _hit_prob;
 655   bool           _exact_check;
 656 
 657 public:
 658   PredictedCallGenerator(ciKlass* predicted_receiver,
 659                          CallGenerator* if_missed,
 660                          CallGenerator* if_hit, bool exact_check,
 661                          float hit_prob)
 662     : CallGenerator(if_missed->method())
 663   {
 664     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 665     // Remove the extremes values from the range.
 666     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 667     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 668 
 669     _predicted_receiver = predicted_receiver;
 670     _if_missed          = if_missed;
 671     _if_hit             = if_hit;
 672     _hit_prob           = hit_prob;
 673     _exact_check        = exact_check;
 674   }
 675 
 676   virtual bool      is_virtual()   const    { return true; }
 677   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 678   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 679 
 680   virtual JVMState* generate(JVMState* jvms);
 681 };
 682 
 683 
 684 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 685                                                  CallGenerator* if_missed,
 686                                                  CallGenerator* if_hit,
 687                                                  float hit_prob) {
 688   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
 689                                     /*exact_check=*/true, hit_prob);
 690 }
 691 
 692 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
 693                                                CallGenerator* if_missed,
 694                                                CallGenerator* if_hit) {
 695   return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
 696                                     /*exact_check=*/false, PROB_ALWAYS);
 697 }
 698 
 699 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
 700   GraphKit kit(jvms);
 701   kit.C->print_inlining_update(this);
 702   PhaseGVN& gvn = kit.gvn();
 703   // We need an explicit receiver null_check before checking its type.
 704   // We share a map with the caller, so his JVMS gets adjusted.
 705   Node* receiver = kit.argument(0);
 706   CompileLog* log = kit.C->log();
 707   if (log != NULL) {
 708     log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
 709               jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
 710   }
 711 
 712   receiver = kit.null_check_receiver_before_call(method());
 713   if (kit.stopped()) {
 714     return kit.transfer_exceptions_into_jvms();
 715   }
 716 
 717   // Make a copy of the replaced nodes in case we need to restore them
 718   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
 719   replaced_nodes.clone();
 720 
 721   Node* casted_receiver = receiver;  // will get updated in place...
 722   Node* slow_ctl = NULL;
 723   if (_exact_check) {
 724     slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
 725                                        &casted_receiver);
 726   } else {
 727     slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
 728                                           &casted_receiver);
 729   }
 730 
 731   SafePointNode* slow_map = NULL;
 732   JVMState* slow_jvms = NULL;
 733   { PreserveJVMState pjvms(&kit);
 734     kit.set_control(slow_ctl);
 735     if (!kit.stopped()) {
 736       slow_jvms = _if_missed->generate(kit.sync_jvms());
 737       if (kit.failing())
 738         return NULL;  // might happen because of NodeCountInliningCutoff
 739       assert(slow_jvms != NULL, "must be");
 740       kit.add_exception_states_from(slow_jvms);
 741       kit.set_map(slow_jvms->map());
 742       if (!kit.stopped())
 743         slow_map = kit.stop();
 744     }
 745   }
 746 
 747   if (kit.stopped()) {
 748     // Instance exactly does not matches the desired type.
 749     kit.set_jvms(slow_jvms);
 750     return kit.transfer_exceptions_into_jvms();
 751   }
 752 
 753   // fall through if the instance exactly matches the desired type
 754   kit.replace_in_map(receiver, casted_receiver);
 755 
 756   // Make the hot call:
 757   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
 758   if (new_jvms == NULL) {
 759     // Inline failed, so make a direct call.
 760     assert(_if_hit->is_inline(), "must have been a failed inline");
 761     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 762     new_jvms = cg->generate(kit.sync_jvms());
 763   }
 764   kit.add_exception_states_from(new_jvms);
 765   kit.set_jvms(new_jvms);
 766 
 767   // Need to merge slow and fast?
 768   if (slow_map == NULL) {
 769     // The fast path is the only path remaining.
 770     return kit.transfer_exceptions_into_jvms();
 771   }
 772 
 773   if (kit.stopped()) {
 774     // Inlined method threw an exception, so it's just the slow path after all.
 775     kit.set_jvms(slow_jvms);
 776     return kit.transfer_exceptions_into_jvms();
 777   }
 778 
 779   // There are 2 branches and the replaced nodes are only valid on
 780   // one: restore the replaced nodes to what they were before the
 781   // branch.
 782   kit.map()->set_replaced_nodes(replaced_nodes);
 783 
 784   // Finish the diamond.
 785   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
 786   RegionNode* region = new RegionNode(3);
 787   region->init_req(1, kit.control());
 788   region->init_req(2, slow_map->control());
 789   kit.set_control(gvn.transform(region));
 790   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
 791   iophi->set_req(2, slow_map->i_o());
 792   kit.set_i_o(gvn.transform(iophi));
 793   // Merge memory
 794   kit.merge_memory(slow_map->merged_memory(), region, 2);
 795   // Transform new memory Phis.
 796   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
 797     Node* phi = mms.memory();
 798     if (phi->is_Phi() && phi->in(0) == region) {
 799       mms.set_memory(gvn.transform(phi));
 800     }
 801   }
 802   uint tos = kit.jvms()->stkoff() + kit.sp();
 803   uint limit = slow_map->req();
 804   for (uint i = TypeFunc::Parms; i < limit; i++) {
 805     // Skip unused stack slots; fast forward to monoff();
 806     if (i == tos) {
 807       i = kit.jvms()->monoff();
 808       if( i >= limit ) break;
 809     }
 810     Node* m = kit.map()->in(i);
 811     Node* n = slow_map->in(i);
 812     if (m != n) {
 813       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 814       Node* phi = PhiNode::make(region, m, t);
 815       phi->set_req(2, n);
 816       kit.map()->set_req(i, gvn.transform(phi));
 817     }
 818   }
 819   return kit.transfer_exceptions_into_jvms();
 820 }
 821 
 822 
 823 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
 824   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
 825   bool input_not_const;
 826   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
 827   Compile* C = Compile::current();
 828   if (cg != NULL) {
 829     if (!delayed_forbidden && AlwaysIncrementalInline) {
 830       return CallGenerator::for_late_inline(callee, cg);
 831     } else {
 832       return cg;
 833     }
 834   }
 835   int bci = jvms->bci();
 836   ciCallProfile profile = caller->call_profile_at_bci(bci);
 837   int call_site_count = caller->scale_count(profile.count());
 838 
 839   if (IncrementalInline && call_site_count > 0 &&
 840       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
 841     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
 842   } else {
 843     // Out-of-line call.
 844     return CallGenerator::for_direct_call(callee);
 845   }
 846 }
 847 
 848 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
 849   GraphKit kit(jvms);
 850   PhaseGVN& gvn = kit.gvn();
 851   Compile* C = kit.C;
 852   vmIntrinsics::ID iid = callee->intrinsic_id();
 853   input_not_const = true;
 854   switch (iid) {
 855   case vmIntrinsics::_invokeBasic:
 856     {
 857       // Get MethodHandle receiver:
 858       Node* receiver = kit.argument(0);
 859       if (receiver->Opcode() == Op_ConP) {
 860         input_not_const = false;
 861         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 862         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 863         const int vtable_index = Method::invalid_vtable_index;
 864 
 865         if (!ciMethod::is_consistent_info(callee, target)) {
 866           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 867                                  "signatures mismatch");
 868           return NULL;
 869         }
 870 
 871         CallGenerator* cg = C->call_generator(target, vtable_index,
 872                                               false /* call_does_dispatch */,
 873                                               jvms,
 874                                               true /* allow_inline */,
 875                                               PROB_ALWAYS);
 876         return cg;
 877       } else {
 878         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 879                                "receiver not constant");
 880       }
 881     }
 882     break;
 883 
 884   case vmIntrinsics::_linkToVirtual:
 885   case vmIntrinsics::_linkToStatic:
 886   case vmIntrinsics::_linkToSpecial:
 887   case vmIntrinsics::_linkToInterface:
 888     {
 889       // Get MemberName argument:
 890       Node* member_name = kit.argument(callee->arg_size() - 1);
 891       if (member_name->Opcode() == Op_ConP) {
 892         input_not_const = false;
 893         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 894         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 895 
 896         if (!ciMethod::is_consistent_info(callee, target)) {
 897           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 898                                  "signatures mismatch");
 899           return NULL;
 900         }
 901 
 902         // In lambda forms we erase signature types to avoid resolving issues
 903         // involving class loaders.  When we optimize a method handle invoke
 904         // to a direct call we must cast the receiver and arguments to its
 905         // actual types.
 906         ciSignature* signature = target->signature();
 907         const int receiver_skip = target->is_static() ? 0 : 1;
 908         // Cast receiver to its type.
 909         if (!target->is_static()) {
 910           Node* arg = kit.argument(0);
 911           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 912           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
 913           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 914             const Type* recv_type = arg_type->join_speculative(sig_type); // keep speculative part
 915             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
 916             kit.set_argument(0, cast_obj);
 917           }
 918         }
 919         // Cast reference arguments to its type.
 920         for (int i = 0, j = 0; i < signature->count(); i++) {
 921           ciType* t = signature->type_at(i);
 922           if (t->is_klass()) {
 923             Node* arg = kit.argument(receiver_skip + j);
 924             const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 925             const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
 926             if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 927               const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part
 928               Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
 929               kit.set_argument(receiver_skip + j, cast_obj);
 930             }
 931           }
 932           j += t->size();  // long and double take two slots
 933         }
 934 
 935         // Try to get the most accurate receiver type
 936         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
 937         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
 938         int  vtable_index       = Method::invalid_vtable_index;
 939         bool call_does_dispatch = false;
 940 
 941         ciKlass* speculative_receiver_type = NULL;
 942         if (is_virtual_or_interface) {
 943           ciInstanceKlass* klass = target->holder();
 944           Node*             receiver_node = kit.argument(0);
 945           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 946           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 947           // optimize_virtual_call() takes 2 different holder
 948           // arguments for a corner case that doesn't apply here (see
 949           // Parse::do_call())
 950           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
 951                                             target, receiver_type, is_virtual,
 952                                             call_does_dispatch, vtable_index, // out-parameters
 953                                             false /* check_access */);
 954           // We lack profiling at this call but type speculation may
 955           // provide us with a type
 956           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
 957         }
 958         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
 959                                               !StressMethodHandleLinkerInlining /* allow_inline */,
 960                                               PROB_ALWAYS,
 961                                               speculative_receiver_type);
 962         return cg;
 963       } else {
 964         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 965                                "member_name not constant");
 966       }
 967     }
 968     break;
 969 
 970   default:
 971     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
 972     break;
 973   }
 974   return NULL;
 975 }
 976 
 977 
 978 //------------------------PredicatedIntrinsicGenerator------------------------------
 979 // Internal class which handles all predicated Intrinsic calls.
 980 class PredicatedIntrinsicGenerator : public CallGenerator {
 981   CallGenerator* _intrinsic;
 982   CallGenerator* _cg;
 983 
 984 public:
 985   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
 986                                CallGenerator* cg)
 987     : CallGenerator(cg->method())
 988   {
 989     _intrinsic = intrinsic;
 990     _cg        = cg;
 991   }
 992 
 993   virtual bool      is_virtual()   const    { return true; }
 994   virtual bool      is_inlined()   const    { return true; }
 995   virtual bool      is_intrinsic() const    { return true; }
 996 
 997   virtual JVMState* generate(JVMState* jvms);
 998 };
 999 
1000 
1001 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1002                                                        CallGenerator* cg) {
1003   return new PredicatedIntrinsicGenerator(intrinsic, cg);
1004 }
1005 
1006 
1007 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1008   // The code we want to generate here is:
1009   //    if (receiver == NULL)
1010   //        uncommon_Trap
1011   //    if (predicate(0))
1012   //        do_intrinsic(0)
1013   //    else
1014   //    if (predicate(1))
1015   //        do_intrinsic(1)
1016   //    ...
1017   //    else
1018   //        do_java_comp
1019 
1020   GraphKit kit(jvms);
1021   PhaseGVN& gvn = kit.gvn();
1022 
1023   CompileLog* log = kit.C->log();
1024   if (log != NULL) {
1025     log->elem("predicated_intrinsic bci='%d' method='%d'",
1026               jvms->bci(), log->identify(method()));
1027   }
1028 
1029   if (!method()->is_static()) {
1030     // We need an explicit receiver null_check before checking its type in predicate.
1031     // We share a map with the caller, so his JVMS gets adjusted.
1032     Node* receiver = kit.null_check_receiver_before_call(method());
1033     if (kit.stopped()) {
1034       return kit.transfer_exceptions_into_jvms();
1035     }
1036   }
1037 
1038   int n_predicates = _intrinsic->predicates_count();
1039   assert(n_predicates > 0, "sanity");
1040 
1041   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1042 
1043   // Region for normal compilation code if intrinsic failed.
1044   Node* slow_region = new RegionNode(1);
1045 
1046   int results = 0;
1047   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1048 #ifdef ASSERT
1049     JVMState* old_jvms = kit.jvms();
1050     SafePointNode* old_map = kit.map();
1051     Node* old_io  = old_map->i_o();
1052     Node* old_mem = old_map->memory();
1053     Node* old_exc = old_map->next_exception();
1054 #endif
1055     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1056 #ifdef ASSERT
1057     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1058     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1059     SafePointNode* new_map = kit.map();
1060     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1061     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1062     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1063 #endif
1064     if (!kit.stopped()) {
1065       PreserveJVMState pjvms(&kit);
1066       // Generate intrinsic code:
1067       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1068       if (new_jvms == NULL) {
1069         // Intrinsic failed, use normal compilation path for this predicate.
1070         slow_region->add_req(kit.control());
1071       } else {
1072         kit.add_exception_states_from(new_jvms);
1073         kit.set_jvms(new_jvms);
1074         if (!kit.stopped()) {
1075           result_jvms[results++] = kit.jvms();
1076         }
1077       }
1078     }
1079     if (else_ctrl == NULL) {
1080       else_ctrl = kit.C->top();
1081     }
1082     kit.set_control(else_ctrl);
1083   }
1084   if (!kit.stopped()) {
1085     // Final 'else' after predicates.
1086     slow_region->add_req(kit.control());
1087   }
1088   if (slow_region->req() > 1) {
1089     PreserveJVMState pjvms(&kit);
1090     // Generate normal compilation code:
1091     kit.set_control(gvn.transform(slow_region));
1092     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1093     if (kit.failing())
1094       return NULL;  // might happen because of NodeCountInliningCutoff
1095     assert(new_jvms != NULL, "must be");
1096     kit.add_exception_states_from(new_jvms);
1097     kit.set_jvms(new_jvms);
1098     if (!kit.stopped()) {
1099       result_jvms[results++] = kit.jvms();
1100     }
1101   }
1102 
1103   if (results == 0) {
1104     // All paths ended in uncommon traps.
1105     (void) kit.stop();
1106     return kit.transfer_exceptions_into_jvms();
1107   }
1108 
1109   if (results == 1) { // Only one path
1110     kit.set_jvms(result_jvms[0]);
1111     return kit.transfer_exceptions_into_jvms();
1112   }
1113 
1114   // Merge all paths.
1115   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1116   RegionNode* region = new RegionNode(results + 1);
1117   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1118   for (int i = 0; i < results; i++) {
1119     JVMState* jvms = result_jvms[i];
1120     int path = i + 1;
1121     SafePointNode* map = jvms->map();
1122     region->init_req(path, map->control());
1123     iophi->set_req(path, map->i_o());
1124     if (i == 0) {
1125       kit.set_jvms(jvms);
1126     } else {
1127       kit.merge_memory(map->merged_memory(), region, path);
1128     }
1129   }
1130   kit.set_control(gvn.transform(region));
1131   kit.set_i_o(gvn.transform(iophi));
1132   // Transform new memory Phis.
1133   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1134     Node* phi = mms.memory();
1135     if (phi->is_Phi() && phi->in(0) == region) {
1136       mms.set_memory(gvn.transform(phi));
1137     }
1138   }
1139 
1140   // Merge debug info.
1141   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1142   uint tos = kit.jvms()->stkoff() + kit.sp();
1143   Node* map = kit.map();
1144   uint limit = map->req();
1145   for (uint i = TypeFunc::Parms; i < limit; i++) {
1146     // Skip unused stack slots; fast forward to monoff();
1147     if (i == tos) {
1148       i = kit.jvms()->monoff();
1149       if( i >= limit ) break;
1150     }
1151     Node* n = map->in(i);
1152     ins[0] = n;
1153     const Type* t = gvn.type(n);
1154     bool needs_phi = false;
1155     for (int j = 1; j < results; j++) {
1156       JVMState* jvms = result_jvms[j];
1157       Node* jmap = jvms->map();
1158       Node* m = NULL;
1159       if (jmap->req() > i) {
1160         m = jmap->in(i);
1161         if (m != n) {
1162           needs_phi = true;
1163           t = t->meet_speculative(gvn.type(m));
1164         }
1165       }
1166       ins[j] = m;
1167     }
1168     if (needs_phi) {
1169       Node* phi = PhiNode::make(region, n, t);
1170       for (int j = 1; j < results; j++) {
1171         phi->set_req(j + 1, ins[j]);
1172       }
1173       map->set_req(i, gvn.transform(phi));
1174     }
1175   }
1176 
1177   return kit.transfer_exceptions_into_jvms();
1178 }
1179 
1180 //-------------------------UncommonTrapCallGenerator-----------------------------
1181 // Internal class which handles all out-of-line calls checking receiver type.
1182 class UncommonTrapCallGenerator : public CallGenerator {
1183   Deoptimization::DeoptReason _reason;
1184   Deoptimization::DeoptAction _action;
1185 
1186 public:
1187   UncommonTrapCallGenerator(ciMethod* m,
1188                             Deoptimization::DeoptReason reason,
1189                             Deoptimization::DeoptAction action)
1190     : CallGenerator(m)
1191   {
1192     _reason = reason;
1193     _action = action;
1194   }
1195 
1196   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
1197   virtual bool      is_trap() const             { return true; }
1198 
1199   virtual JVMState* generate(JVMState* jvms);
1200 };
1201 
1202 
1203 CallGenerator*
1204 CallGenerator::for_uncommon_trap(ciMethod* m,
1205                                  Deoptimization::DeoptReason reason,
1206                                  Deoptimization::DeoptAction action) {
1207   return new UncommonTrapCallGenerator(m, reason, action);
1208 }
1209 
1210 
1211 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1212   GraphKit kit(jvms);
1213   kit.C->print_inlining_update(this);
1214   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1215   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1216   // Use callsite signature always.
1217   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1218   int nargs = declared_method->arg_size();
1219   kit.inc_sp(nargs);
1220   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1221   if (_reason == Deoptimization::Reason_class_check &&
1222       _action == Deoptimization::Action_maybe_recompile) {
1223     // Temp fix for 6529811
1224     // Don't allow uncommon_trap to override our decision to recompile in the event
1225     // of a class cast failure for a monomorphic call as it will never let us convert
1226     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1227     bool keep_exact_action = true;
1228     kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1229   } else {
1230     kit.uncommon_trap(_reason, _action);
1231   }
1232   return kit.transfer_exceptions_into_jvms();
1233 }
1234 
1235 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1236 
1237 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)
1238 
1239 #define NODES_OVERHEAD_PER_METHOD (30.0)
1240 #define NODES_PER_BYTECODE (9.5)
1241 
1242 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1243   int call_count = profile.count();
1244   int code_size = call_method->code_size();
1245 
1246   // Expected execution count is based on the historical count:
1247   _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1248 
1249   // Expected profit from inlining, in units of simple call-overheads.
1250   _profit = 1.0;
1251 
1252   // Expected work performed by the call in units of call-overheads.
1253   // %%% need an empirical curve fit for "work" (time in call)
1254   float bytecodes_per_call = 3;
1255   _work = 1.0 + code_size / bytecodes_per_call;
1256 
1257   // Expected size of compilation graph:
1258   // -XX:+PrintParseStatistics once reported:
1259   //  Methods seen: 9184  Methods parsed: 9184  Nodes created: 1582391
1260   //  Histogram of 144298 parsed bytecodes:
1261   // %%% Need an better predictor for graph size.
1262   _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1263 }
1264 
1265 // is_cold:  Return true if the node should never be inlined.
1266 // This is true if any of the key metrics are extreme.
1267 bool WarmCallInfo::is_cold() const {
1268   if (count()  <  WarmCallMinCount)        return true;
1269   if (profit() <  WarmCallMinProfit)       return true;
1270   if (work()   >  WarmCallMaxWork)         return true;
1271   if (size()   >  WarmCallMaxSize)         return true;
1272   return false;
1273 }
1274 
1275 // is_hot:  Return true if the node should be inlined immediately.
1276 // This is true if any of the key metrics are extreme.
1277 bool WarmCallInfo::is_hot() const {
1278   assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1279   if (count()  >= HotCallCountThreshold)   return true;
1280   if (profit() >= HotCallProfitThreshold)  return true;
1281   if (work()   <= HotCallTrivialWork)      return true;
1282   if (size()   <= HotCallTrivialSize)      return true;
1283   return false;
1284 }
1285 
1286 // compute_heat:
1287 float WarmCallInfo::compute_heat() const {
1288   assert(!is_cold(), "compute heat only on warm nodes");
1289   assert(!is_hot(),  "compute heat only on warm nodes");
1290   int min_size = MAX2(0,   (int)HotCallTrivialSize);
1291   int max_size = MIN2(500, (int)WarmCallMaxSize);
1292   float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1293   float size_factor;
1294   if      (method_size < 0.05)  size_factor = 4;   // 2 sigmas better than avg.
1295   else if (method_size < 0.15)  size_factor = 2;   // 1 sigma better than avg.
1296   else if (method_size < 0.5)   size_factor = 1;   // better than avg.
1297   else                          size_factor = 0.5; // worse than avg.
1298   return (count() * profit() * size_factor);
1299 }
1300 
1301 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1302   assert(this != that, "compare only different WCIs");
1303   assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1304   if (this->heat() > that->heat())   return true;
1305   if (this->heat() < that->heat())   return false;
1306   assert(this->heat() == that->heat(), "no NaN heat allowed");
1307   // Equal heat.  Break the tie some other way.
1308   if (!this->call() || !that->call())  return (address)this > (address)that;
1309   return this->call()->_idx > that->call()->_idx;
1310 }
1311 
1312 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1313 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
1314 
1315 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1316   assert(next() == UNINIT_NEXT, "not yet on any list");
1317   WarmCallInfo* prev_p = NULL;
1318   WarmCallInfo* next_p = head;
1319   while (next_p != NULL && next_p->warmer_than(this)) {
1320     prev_p = next_p;
1321     next_p = prev_p->next();
1322   }
1323   // Install this between prev_p and next_p.
1324   this->set_next(next_p);
1325   if (prev_p == NULL)
1326     head = this;
1327   else
1328     prev_p->set_next(this);
1329   return head;
1330 }
1331 
1332 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1333   WarmCallInfo* prev_p = NULL;
1334   WarmCallInfo* next_p = head;
1335   while (next_p != this) {
1336     assert(next_p != NULL, "this must be in the list somewhere");
1337     prev_p = next_p;
1338     next_p = prev_p->next();
1339   }
1340   next_p = this->next();
1341   debug_only(this->set_next(UNINIT_NEXT));
1342   // Remove this from between prev_p and next_p.
1343   if (prev_p == NULL)
1344     head = next_p;
1345   else
1346     prev_p->set_next(next_p);
1347   return head;
1348 }
1349 
1350 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1351                                        WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1352 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1353                                         WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1354 
1355 WarmCallInfo* WarmCallInfo::always_hot() {
1356   assert(_always_hot.is_hot(), "must always be hot");
1357   return &_always_hot;
1358 }
1359 
1360 WarmCallInfo* WarmCallInfo::always_cold() {
1361   assert(_always_cold.is_cold(), "must always be cold");
1362   return &_always_cold;
1363 }
1364 
1365 
1366 #ifndef PRODUCT
1367 
1368 void WarmCallInfo::print() const {
1369   tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1370              is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1371              count(), profit(), work(), size(), compute_heat(), next());
1372   tty->cr();
1373   if (call() != NULL)  call()->dump();
1374 }
1375 
1376 void print_wci(WarmCallInfo* ci) {
1377   ci->print();
1378 }
1379 
1380 void WarmCallInfo::print_all() const {
1381   for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1382     p->print();
1383 }
1384 
1385 int WarmCallInfo::count_all() const {
1386   int cnt = 0;
1387   for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1388     cnt++;
1389   return cnt;
1390 }
1391 
1392 #endif //PRODUCT