1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "ci/ciCallSite.hpp"
  28 #include "ci/ciObjArray.hpp"
  29 #include "ci/ciMemberName.hpp"
  30 #include "ci/ciMethodHandle.hpp"
  31 #include "classfile/javaClasses.hpp"
  32 #include "compiler/compileLog.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/callGenerator.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 
  44 // Utility function.
  45 const TypeFunc* CallGenerator::tf() const {
  46   return TypeFunc::make(method());
  47 }
  48 
  49 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* callee) {
  50   ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci());
  51   return symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic();
  52 }
  53 
  54 //-----------------------------ParseGenerator---------------------------------
  55 // Internal class which handles all direct bytecode traversal.
  56 class ParseGenerator : public InlineCallGenerator {
  57 private:
  58   bool  _is_osr;
  59   float _expected_uses;
  60 
  61 public:
  62   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  63     : InlineCallGenerator(method)
  64   {
  65     _is_osr        = is_osr;
  66     _expected_uses = expected_uses;
  67     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
  68   }
  69 
  70   virtual bool      is_parse() const           { return true; }
  71   virtual JVMState* generate(JVMState* jvms);
  72   int is_osr() { return _is_osr; }
  73 
  74 };
  75 
  76 JVMState* ParseGenerator::generate(JVMState* jvms) {
  77   Compile* C = Compile::current();
  78   C->print_inlining_update(this);
  79 
  80   if (is_osr()) {
  81     // The JVMS for a OSR has a single argument (see its TypeFunc).
  82     assert(jvms->depth() == 1, "no inline OSR");
  83   }
  84 
  85   if (C->failing()) {
  86     return NULL;  // bailing out of the compile; do not try to parse
  87   }
  88 
  89   Parse parser(jvms, method(), _expected_uses);
  90   // Grab signature for matching/allocation
  91 #ifdef ASSERT
  92   if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
  93     MutexLocker ml(Compile_lock);
  94     assert(C->env()->system_dictionary_modification_counter_changed(),
  95            "Must invalidate if TypeFuncs differ");
  96   }
  97 #endif
  98 
  99   GraphKit& exits = parser.exits();
 100 
 101   if (C->failing()) {
 102     while (exits.pop_exception_state() != NULL) ;
 103     return NULL;
 104   }
 105 
 106   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 107 
 108   // Simply return the exit state of the parser,
 109   // augmented by any exceptional states.
 110   return exits.transfer_exceptions_into_jvms();
 111 }
 112 
 113 //---------------------------DirectCallGenerator------------------------------
 114 // Internal class which handles all out-of-line calls w/o receiver type checks.
 115 class DirectCallGenerator : public CallGenerator {
 116  private:
 117   CallStaticJavaNode* _call_node;
 118   // Force separate memory and I/O projections for the exceptional
 119   // paths to facilitate late inlinig.
 120   bool                _separate_io_proj;
 121 
 122  public:
 123   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 124     : CallGenerator(method),
 125       _separate_io_proj(separate_io_proj)
 126   {
 127   }
 128   virtual JVMState* generate(JVMState* jvms);
 129 
 130   CallStaticJavaNode* call_node() const { return _call_node; }
 131 };
 132 
 133 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 134   GraphKit kit(jvms);
 135   kit.C->print_inlining_update(this);
 136   bool is_static = method()->is_static();
 137   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 138                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 139 
 140   if (kit.C->log() != NULL) {
 141     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 142   }
 143 
 144   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 145   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 146     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 147     // additional information about the method being invoked should be attached
 148     // to the call site to make resolution logic work
 149     // (see SharedRuntime::resolve_static_call_C).
 150     call->set_override_symbolic_info(true);
 151   }
 152   _call_node = call;  // Save the call node in case we need it later
 153   if (!is_static) {
 154     // Make an explicit receiver null_check as part of this call.
 155     // Since we share a map with the caller, his JVMS gets adjusted.
 156     kit.null_check_receiver_before_call(method());
 157     if (kit.stopped()) {
 158       // And dump it back to the caller, decorated with any exceptions:
 159       return kit.transfer_exceptions_into_jvms();
 160     }
 161     // Mark the call node as virtual, sort of:
 162     call->set_optimized_virtual(true);
 163     if (method()->is_method_handle_intrinsic() ||
 164         method()->is_compiled_lambda_form()) {
 165       call->set_method_handle_invoke(true);
 166     }
 167   }
 168   kit.set_arguments_for_java_call(call);
 169   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 170   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 171   kit.push_node(method()->return_type()->basic_type(), ret);
 172   return kit.transfer_exceptions_into_jvms();
 173 }
 174 
 175 //--------------------------VirtualCallGenerator------------------------------
 176 // Internal class which handles all out-of-line calls checking receiver type.
 177 class VirtualCallGenerator : public CallGenerator {
 178 private:
 179   int _vtable_index;
 180 public:
 181   VirtualCallGenerator(ciMethod* method, int vtable_index)
 182     : CallGenerator(method), _vtable_index(vtable_index)
 183   {
 184     assert(vtable_index == Method::invalid_vtable_index ||
 185            vtable_index >= 0, "either invalid or usable");
 186   }
 187   virtual bool      is_virtual() const          { return true; }
 188   virtual JVMState* generate(JVMState* jvms);
 189 };
 190 
 191 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 192   GraphKit kit(jvms);
 193   Node* receiver = kit.argument(0);
 194 
 195   kit.C->print_inlining_update(this);
 196 
 197   if (kit.C->log() != NULL) {
 198     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 199   }
 200 
 201   // If the receiver is a constant null, do not torture the system
 202   // by attempting to call through it.  The compile will proceed
 203   // correctly, but may bail out in final_graph_reshaping, because
 204   // the call instruction will have a seemingly deficient out-count.
 205   // (The bailout says something misleading about an "infinite loop".)
 206   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 207     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 208     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 209     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 210     kit.inc_sp(arg_size);  // restore arguments
 211     kit.uncommon_trap(Deoptimization::Reason_null_check,
 212                       Deoptimization::Action_none,
 213                       NULL, "null receiver");
 214     return kit.transfer_exceptions_into_jvms();
 215   }
 216 
 217   // Ideally we would unconditionally do a null check here and let it
 218   // be converted to an implicit check based on profile information.
 219   // However currently the conversion to implicit null checks in
 220   // Block::implicit_null_check() only looks for loads and stores, not calls.
 221   ciMethod *caller = kit.method();
 222   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
 223   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 224        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 225        (caller_md->trap_count(Deoptimization::Reason_null_check)
 226        >= (uint)ImplicitNullCheckThreshold))) {
 227     // Make an explicit receiver null_check as part of this call.
 228     // Since we share a map with the caller, his JVMS gets adjusted.
 229     receiver = kit.null_check_receiver_before_call(method());
 230     if (kit.stopped()) {
 231       // And dump it back to the caller, decorated with any exceptions:
 232       return kit.transfer_exceptions_into_jvms();
 233     }
 234   }
 235 
 236   assert(!method()->is_static(), "virtual call must not be to static");
 237   assert(!method()->is_final(), "virtual call should not be to final");
 238   assert(!method()->is_private(), "virtual call should not be to private");
 239   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 240          "no vtable calls if +UseInlineCaches ");
 241   address target = SharedRuntime::get_resolve_virtual_call_stub();
 242   // Normal inline cache used for call
 243   CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
 244   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 245     // To be able to issue a direct call (optimized virtual or virtual)
 246     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 247     // about the method being invoked should be attached to the call site to
 248     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 249     call->set_override_symbolic_info(true);
 250   }
 251   kit.set_arguments_for_java_call(call);
 252   kit.set_edges_for_java_call(call);
 253   Node* ret = kit.set_results_for_java_call(call);
 254   kit.push_node(method()->return_type()->basic_type(), ret);
 255 
 256   // Represent the effect of an implicit receiver null_check
 257   // as part of this call.  Since we share a map with the caller,
 258   // his JVMS gets adjusted.
 259   kit.cast_not_null(receiver);
 260   return kit.transfer_exceptions_into_jvms();
 261 }
 262 
 263 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 264   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 265   return new ParseGenerator(m, expected_uses);
 266 }
 267 
 268 // As a special case, the JVMS passed to this CallGenerator is
 269 // for the method execution already in progress, not just the JVMS
 270 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 271 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 272   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 273   float past_uses = m->interpreter_invocation_count();
 274   float expected_uses = past_uses;
 275   return new ParseGenerator(m, expected_uses, true);
 276 }
 277 
 278 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 279   assert(!m->is_abstract(), "for_direct_call mismatch");
 280   return new DirectCallGenerator(m, separate_io_proj);
 281 }
 282 
 283 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 284   assert(!m->is_static(), "for_virtual_call mismatch");
 285   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 286   return new VirtualCallGenerator(m, vtable_index);
 287 }
 288 
 289 // Allow inlining decisions to be delayed
 290 class LateInlineCallGenerator : public DirectCallGenerator {
 291  private:
 292   // unique id for log compilation
 293   jlong _unique_id;
 294 
 295  protected:
 296   CallGenerator* _inline_cg;
 297   virtual bool do_late_inline_check(JVMState* jvms) { return true; }
 298 
 299  public:
 300   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 301     DirectCallGenerator(method, true), _unique_id(0), _inline_cg(inline_cg) {}
 302 
 303   virtual bool is_late_inline() const { return true; }
 304 
 305   // Convert the CallStaticJava into an inline
 306   virtual void do_late_inline();
 307 
 308   virtual JVMState* generate(JVMState* jvms) {
 309     Compile *C = Compile::current();
 310 
 311     C->log_inline_id(this);
 312 
 313     // Record that this call site should be revisited once the main
 314     // parse is finished.
 315     if (!is_mh_late_inline()) {
 316       C->add_late_inline(this);
 317     }
 318 
 319     // Emit the CallStaticJava and request separate projections so
 320     // that the late inlining logic can distinguish between fall
 321     // through and exceptional uses of the memory and io projections
 322     // as is done for allocations and macro expansion.
 323     return DirectCallGenerator::generate(jvms);
 324   }
 325 
 326   virtual void print_inlining_late(const char* msg) {
 327     CallNode* call = call_node();
 328     Compile* C = Compile::current();
 329     C->print_inlining_assert_ready();
 330     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 331     C->print_inlining_move_to(this);
 332     C->print_inlining_update_delayed(this);
 333   }
 334 
 335   virtual void set_unique_id(jlong id) {
 336     _unique_id = id;
 337   }
 338 
 339   virtual jlong unique_id() const {
 340     return _unique_id;
 341   }
 342 };
 343 
 344 void LateInlineCallGenerator::do_late_inline() {
 345   // Can't inline it
 346   CallStaticJavaNode* call = call_node();
 347   if (call == NULL || call->outcnt() == 0 ||
 348       call->in(0) == NULL || call->in(0)->is_top()) {
 349     return;
 350   }
 351 
 352   const TypeTuple *r = call->tf()->domain();
 353   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 354     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 355       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 356       return;
 357     }
 358   }
 359 
 360   if (call->in(TypeFunc::Memory)->is_top()) {
 361     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 362     return;
 363   }
 364 
 365   // check for unreachable loop
 366   CallProjections callprojs;
 367   call->extract_projections(&callprojs, true);
 368   if (callprojs.fallthrough_catchproj == call->in(0) ||
 369       callprojs.catchall_catchproj == call->in(0) ||
 370       callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
 371       callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
 372       callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
 373       callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
 374       (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
 375       (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
 376     return;
 377   }
 378 
 379   Compile* C = Compile::current();
 380   // Remove inlined methods from Compiler's lists.
 381   if (call->is_macro()) {
 382     C->remove_macro_node(call);
 383   }
 384 
 385   // Make a clone of the JVMState that appropriate to use for driving a parse
 386   JVMState* old_jvms = call->jvms();
 387   JVMState* jvms = old_jvms->clone_shallow(C);
 388   uint size = call->req();
 389   SafePointNode* map = new SafePointNode(size, jvms);
 390   for (uint i1 = 0; i1 < size; i1++) {
 391     map->init_req(i1, call->in(i1));
 392   }
 393 
 394   // Make sure the state is a MergeMem for parsing.
 395   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 396     Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 397     C->initial_gvn()->set_type_bottom(mem);
 398     map->set_req(TypeFunc::Memory, mem);
 399   }
 400 
 401   uint nargs = method()->arg_size();
 402   // blow away old call arguments
 403   Node* top = C->top();
 404   for (uint i1 = 0; i1 < nargs; i1++) {
 405     map->set_req(TypeFunc::Parms + i1, top);
 406   }
 407   jvms->set_map(map);
 408 
 409   // Make enough space in the expression stack to transfer
 410   // the incoming arguments and return value.
 411   map->ensure_stack(jvms, jvms->method()->max_stack());
 412   for (uint i1 = 0; i1 < nargs; i1++) {
 413     map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
 414   }
 415 
 416   C->print_inlining_assert_ready();
 417 
 418   C->print_inlining_move_to(this);
 419 
 420   C->log_late_inline(this);
 421 
 422   // This check is done here because for_method_handle_inline() method
 423   // needs jvms for inlined state.
 424   if (!do_late_inline_check(jvms)) {
 425     map->disconnect_inputs(NULL, C);
 426     return;
 427   }
 428 
 429   // Setup default node notes to be picked up by the inlining
 430   Node_Notes* old_nn = C->node_notes_at(call->_idx);
 431   if (old_nn != NULL) {
 432     Node_Notes* entry_nn = old_nn->clone(C);
 433     entry_nn->set_jvms(jvms);
 434     C->set_default_node_notes(entry_nn);
 435   }
 436 
 437   // Now perform the inlining using the synthesized JVMState
 438   JVMState* new_jvms = _inline_cg->generate(jvms);
 439   if (new_jvms == NULL)  return;  // no change
 440   if (C->failing())      return;
 441 
 442   // Capture any exceptional control flow
 443   GraphKit kit(new_jvms);
 444 
 445   // Find the result object
 446   Node* result = C->top();
 447   int   result_size = method()->return_type()->size();
 448   if (result_size != 0 && !kit.stopped()) {
 449     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 450   }
 451 
 452   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 453   C->env()->notice_inlined_method(_inline_cg->method());
 454   C->set_inlining_progress(true);
 455 
 456   kit.replace_call(call, result, true);
 457 }
 458 
 459 
 460 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 461   return new LateInlineCallGenerator(method, inline_cg);
 462 }
 463 
 464 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 465   ciMethod* _caller;
 466   int _attempt;
 467   bool _input_not_const;
 468 
 469   virtual bool do_late_inline_check(JVMState* jvms);
 470   virtual bool already_attempted() const { return _attempt > 0; }
 471 
 472  public:
 473   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 474     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 475 
 476   virtual bool is_mh_late_inline() const { return true; }
 477 
 478   virtual JVMState* generate(JVMState* jvms) {
 479     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 480 
 481     Compile* C = Compile::current();
 482     if (_input_not_const) {
 483       // inlining won't be possible so no need to enqueue right now.
 484       call_node()->set_generator(this);
 485     } else {
 486       C->add_late_inline(this);
 487     }
 488     return new_jvms;
 489   }
 490 };
 491 
 492 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 493 
 494   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 495 
 496   Compile::current()->print_inlining_update_delayed(this);
 497 
 498   if (!_input_not_const) {
 499     _attempt++;
 500   }
 501 
 502   if (cg != NULL && cg->is_inline()) {
 503     assert(!cg->is_late_inline(), "we're doing late inlining");
 504     _inline_cg = cg;
 505     Compile::current()->dec_number_of_mh_late_inlines();
 506     return true;
 507   }
 508 
 509   call_node()->set_generator(this);
 510   return false;
 511 }
 512 
 513 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 514   Compile::current()->inc_number_of_mh_late_inlines();
 515   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 516   return cg;
 517 }
 518 
 519 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 520 
 521  public:
 522   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 523     LateInlineCallGenerator(method, inline_cg) {}
 524 
 525   virtual JVMState* generate(JVMState* jvms) {
 526     Compile *C = Compile::current();
 527 
 528     C->log_inline_id(this);
 529 
 530     C->add_string_late_inline(this);
 531 
 532     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 533     return new_jvms;
 534   }
 535 
 536   virtual bool is_string_late_inline() const { return true; }
 537 };
 538 
 539 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 540   return new LateInlineStringCallGenerator(method, inline_cg);
 541 }
 542 
 543 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 544 
 545  public:
 546   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 547     LateInlineCallGenerator(method, inline_cg) {}
 548 
 549   virtual JVMState* generate(JVMState* jvms) {
 550     Compile *C = Compile::current();
 551 
 552     C->log_inline_id(this);
 553 
 554     C->add_boxing_late_inline(this);
 555 
 556     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 557     return new_jvms;
 558   }
 559 };
 560 
 561 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 562   return new LateInlineBoxingCallGenerator(method, inline_cg);
 563 }
 564 
 565 //---------------------------WarmCallGenerator--------------------------------
 566 // Internal class which handles initial deferral of inlining decisions.
 567 class WarmCallGenerator : public CallGenerator {
 568   WarmCallInfo*   _call_info;
 569   CallGenerator*  _if_cold;
 570   CallGenerator*  _if_hot;
 571   bool            _is_virtual;   // caches virtuality of if_cold
 572   bool            _is_inline;    // caches inline-ness of if_hot
 573 
 574 public:
 575   WarmCallGenerator(WarmCallInfo* ci,
 576                     CallGenerator* if_cold,
 577                     CallGenerator* if_hot)
 578     : CallGenerator(if_cold->method())
 579   {
 580     assert(method() == if_hot->method(), "consistent choices");
 581     _call_info  = ci;
 582     _if_cold    = if_cold;
 583     _if_hot     = if_hot;
 584     _is_virtual = if_cold->is_virtual();
 585     _is_inline  = if_hot->is_inline();
 586   }
 587 
 588   virtual bool      is_inline() const           { return _is_inline; }
 589   virtual bool      is_virtual() const          { return _is_virtual; }
 590   virtual bool      is_deferred() const         { return true; }
 591 
 592   virtual JVMState* generate(JVMState* jvms);
 593 };
 594 
 595 
 596 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
 597                                             CallGenerator* if_cold,
 598                                             CallGenerator* if_hot) {
 599   return new WarmCallGenerator(ci, if_cold, if_hot);
 600 }
 601 
 602 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
 603   Compile* C = Compile::current();
 604   C->print_inlining_update(this);
 605 
 606   if (C->log() != NULL) {
 607     C->log()->elem("warm_call bci='%d'", jvms->bci());
 608   }
 609   jvms = _if_cold->generate(jvms);
 610   if (jvms != NULL) {
 611     Node* m = jvms->map()->control();
 612     if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
 613     if (m->is_Catch())     m = m->in(0);  else m = C->top();
 614     if (m->is_Proj())      m = m->in(0);  else m = C->top();
 615     if (m->is_CallJava()) {
 616       _call_info->set_call(m->as_Call());
 617       _call_info->set_hot_cg(_if_hot);
 618 #ifndef PRODUCT
 619       if (PrintOpto || PrintOptoInlining) {
 620         tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
 621         tty->print("WCI: ");
 622         _call_info->print();
 623       }
 624 #endif
 625       _call_info->set_heat(_call_info->compute_heat());
 626       C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
 627     }
 628   }
 629   return jvms;
 630 }
 631 
 632 void WarmCallInfo::make_hot() {
 633   Unimplemented();
 634 }
 635 
 636 void WarmCallInfo::make_cold() {
 637   // No action:  Just dequeue.
 638 }
 639 
 640 
 641 //------------------------PredictedCallGenerator------------------------------
 642 // Internal class which handles all out-of-line calls checking receiver type.
 643 class PredictedCallGenerator : public CallGenerator {
 644   ciKlass*       _predicted_receiver;
 645   CallGenerator* _if_missed;
 646   CallGenerator* _if_hit;
 647   float          _hit_prob;
 648 
 649 public:
 650   PredictedCallGenerator(ciKlass* predicted_receiver,
 651                          CallGenerator* if_missed,
 652                          CallGenerator* if_hit, float hit_prob)
 653     : CallGenerator(if_missed->method())
 654   {
 655     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 656     // Remove the extremes values from the range.
 657     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 658     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 659 
 660     _predicted_receiver = predicted_receiver;
 661     _if_missed          = if_missed;
 662     _if_hit             = if_hit;
 663     _hit_prob           = hit_prob;
 664   }
 665 
 666   virtual bool      is_virtual()   const    { return true; }
 667   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 668   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 669 
 670   virtual JVMState* generate(JVMState* jvms);
 671 };
 672 
 673 
 674 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 675                                                  CallGenerator* if_missed,
 676                                                  CallGenerator* if_hit,
 677                                                  float hit_prob) {
 678   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
 679 }
 680 
 681 
 682 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
 683   GraphKit kit(jvms);
 684   kit.C->print_inlining_update(this);
 685   PhaseGVN& gvn = kit.gvn();
 686   // We need an explicit receiver null_check before checking its type.
 687   // We share a map with the caller, so his JVMS gets adjusted.
 688   Node* receiver = kit.argument(0);
 689   CompileLog* log = kit.C->log();
 690   if (log != NULL) {
 691     log->elem("predicted_call bci='%d' klass='%d'",
 692               jvms->bci(), log->identify(_predicted_receiver));
 693   }
 694 
 695   receiver = kit.null_check_receiver_before_call(method());
 696   if (kit.stopped()) {
 697     return kit.transfer_exceptions_into_jvms();
 698   }
 699 
 700   // Make a copy of the replaced nodes in case we need to restore them
 701   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
 702   replaced_nodes.clone();
 703 
 704   Node* exact_receiver = receiver;  // will get updated in place...
 705   Node* slow_ctl = kit.type_check_receiver(receiver,
 706                                            _predicted_receiver, _hit_prob,
 707                                            &exact_receiver);
 708 
 709   SafePointNode* slow_map = NULL;
 710   JVMState* slow_jvms = NULL;
 711   { PreserveJVMState pjvms(&kit);
 712     kit.set_control(slow_ctl);
 713     if (!kit.stopped()) {
 714       slow_jvms = _if_missed->generate(kit.sync_jvms());
 715       if (kit.failing())
 716         return NULL;  // might happen because of NodeCountInliningCutoff
 717       assert(slow_jvms != NULL, "must be");
 718       kit.add_exception_states_from(slow_jvms);
 719       kit.set_map(slow_jvms->map());
 720       if (!kit.stopped())
 721         slow_map = kit.stop();
 722     }
 723   }
 724 
 725   if (kit.stopped()) {
 726     // Instance exactly does not matches the desired type.
 727     kit.set_jvms(slow_jvms);
 728     return kit.transfer_exceptions_into_jvms();
 729   }
 730 
 731   // fall through if the instance exactly matches the desired type
 732   kit.replace_in_map(receiver, exact_receiver);
 733 
 734   // Make the hot call:
 735   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
 736   if (new_jvms == NULL) {
 737     // Inline failed, so make a direct call.
 738     assert(_if_hit->is_inline(), "must have been a failed inline");
 739     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 740     new_jvms = cg->generate(kit.sync_jvms());
 741   }
 742   kit.add_exception_states_from(new_jvms);
 743   kit.set_jvms(new_jvms);
 744 
 745   // Need to merge slow and fast?
 746   if (slow_map == NULL) {
 747     // The fast path is the only path remaining.
 748     return kit.transfer_exceptions_into_jvms();
 749   }
 750 
 751   if (kit.stopped()) {
 752     // Inlined method threw an exception, so it's just the slow path after all.
 753     kit.set_jvms(slow_jvms);
 754     return kit.transfer_exceptions_into_jvms();
 755   }
 756 
 757   // There are 2 branches and the replaced nodes are only valid on
 758   // one: restore the replaced nodes to what they were before the
 759   // branch.
 760   kit.map()->set_replaced_nodes(replaced_nodes);
 761 
 762   // Finish the diamond.
 763   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
 764   RegionNode* region = new RegionNode(3);
 765   region->init_req(1, kit.control());
 766   region->init_req(2, slow_map->control());
 767   kit.set_control(gvn.transform(region));
 768   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
 769   iophi->set_req(2, slow_map->i_o());
 770   kit.set_i_o(gvn.transform(iophi));
 771   // Merge memory
 772   kit.merge_memory(slow_map->merged_memory(), region, 2);
 773   // Transform new memory Phis.
 774   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
 775     Node* phi = mms.memory();
 776     if (phi->is_Phi() && phi->in(0) == region) {
 777       mms.set_memory(gvn.transform(phi));
 778     }
 779   }
 780   uint tos = kit.jvms()->stkoff() + kit.sp();
 781   uint limit = slow_map->req();
 782   for (uint i = TypeFunc::Parms; i < limit; i++) {
 783     // Skip unused stack slots; fast forward to monoff();
 784     if (i == tos) {
 785       i = kit.jvms()->monoff();
 786       if( i >= limit ) break;
 787     }
 788     Node* m = kit.map()->in(i);
 789     Node* n = slow_map->in(i);
 790     if (m != n) {
 791       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
 792       Node* phi = PhiNode::make(region, m, t);
 793       phi->set_req(2, n);
 794       kit.map()->set_req(i, gvn.transform(phi));
 795     }
 796   }
 797   return kit.transfer_exceptions_into_jvms();
 798 }
 799 
 800 
 801 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
 802   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
 803   bool input_not_const;
 804   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
 805   Compile* C = Compile::current();
 806   if (cg != NULL) {
 807     if (!delayed_forbidden && AlwaysIncrementalInline) {
 808       return CallGenerator::for_late_inline(callee, cg);
 809     } else {
 810       return cg;
 811     }
 812   }
 813   int bci = jvms->bci();
 814   ciCallProfile profile = caller->call_profile_at_bci(bci);
 815   int call_site_count = caller->scale_count(profile.count());
 816 
 817   if (IncrementalInline && call_site_count > 0 &&
 818       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
 819     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
 820   } else {
 821     // Out-of-line call.
 822     return CallGenerator::for_direct_call(callee);
 823   }
 824 }
 825 
 826 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
 827   GraphKit kit(jvms);
 828   PhaseGVN& gvn = kit.gvn();
 829   Compile* C = kit.C;
 830   vmIntrinsics::ID iid = callee->intrinsic_id();
 831   input_not_const = true;
 832   switch (iid) {
 833   case vmIntrinsics::_invokeBasic:
 834     {
 835       // Get MethodHandle receiver:
 836       Node* receiver = kit.argument(0);
 837       if (receiver->Opcode() == Op_ConP) {
 838         input_not_const = false;
 839         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 840         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 841         const int vtable_index = Method::invalid_vtable_index;
 842 
 843         if (!ciMethod::is_consistent_info(callee, target)) {
 844           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 845                                  "signatures mismatch");
 846           return NULL;
 847         }
 848 
 849         CallGenerator* cg = C->call_generator(target, vtable_index,
 850                                               false /* call_does_dispatch */,
 851                                               jvms,
 852                                               true /* allow_inline */,
 853                                               PROB_ALWAYS);
 854         return cg;
 855       } else {
 856         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 857                                "receiver not constant");
 858       }
 859     }
 860     break;
 861 
 862   case vmIntrinsics::_linkToVirtual:
 863   case vmIntrinsics::_linkToStatic:
 864   case vmIntrinsics::_linkToSpecial:
 865   case vmIntrinsics::_linkToInterface:
 866     {
 867       // Get MemberName argument:
 868       Node* member_name = kit.argument(callee->arg_size() - 1);
 869       if (member_name->Opcode() == Op_ConP) {
 870         input_not_const = false;
 871         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 872         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 873 
 874         if (!ciMethod::is_consistent_info(callee, target)) {
 875           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 876                                  "signatures mismatch");
 877           return NULL;
 878         }
 879 
 880         // In lambda forms we erase signature types to avoid resolving issues
 881         // involving class loaders.  When we optimize a method handle invoke
 882         // to a direct call we must cast the receiver and arguments to its
 883         // actual types.
 884         ciSignature* signature = target->signature();
 885         const int receiver_skip = target->is_static() ? 0 : 1;
 886         // Cast receiver to its type.
 887         if (!target->is_static()) {
 888           Node* arg = kit.argument(0);
 889           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 890           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
 891           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 892             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
 893             kit.set_argument(0, cast_obj);
 894           }
 895         }
 896         // Cast reference arguments to its type.
 897         for (int i = 0, j = 0; i < signature->count(); i++) {
 898           ciType* t = signature->type_at(i);
 899           if (t->is_klass()) {
 900             Node* arg = kit.argument(receiver_skip + j);
 901             const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
 902             const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
 903             if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
 904               Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
 905               kit.set_argument(receiver_skip + j, cast_obj);
 906             }
 907           }
 908           j += t->size();  // long and double take two slots
 909         }
 910 
 911         // Try to get the most accurate receiver type
 912         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
 913         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
 914         int  vtable_index       = Method::invalid_vtable_index;
 915         bool call_does_dispatch = false;
 916 
 917         ciKlass* speculative_receiver_type = NULL;
 918         if (is_virtual_or_interface) {
 919           ciInstanceKlass* klass = target->holder();
 920           Node*             receiver_node = kit.argument(0);
 921           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 922           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 923           // optimize_virtual_call() takes 2 different holder
 924           // arguments for a corner case that doesn't apply here (see
 925           // Parse::do_call())
 926           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
 927                                             target, receiver_type, is_virtual,
 928                                             call_does_dispatch, vtable_index, // out-parameters
 929                                             false /* check_access */);
 930           // We lack profiling at this call but type speculation may
 931           // provide us with a type
 932           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
 933         }
 934         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
 935                                               !StressMethodHandleLinkerInlining /* allow_inline */,
 936                                               PROB_ALWAYS,
 937                                               speculative_receiver_type);
 938         return cg;
 939       } else {
 940         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
 941                                "member_name not constant");
 942       }
 943     }
 944     break;
 945 
 946   default:
 947     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
 948     break;
 949   }
 950   return NULL;
 951 }
 952 
 953 
 954 //------------------------PredicatedIntrinsicGenerator------------------------------
 955 // Internal class which handles all predicated Intrinsic calls.
 956 class PredicatedIntrinsicGenerator : public CallGenerator {
 957   CallGenerator* _intrinsic;
 958   CallGenerator* _cg;
 959 
 960 public:
 961   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
 962                                CallGenerator* cg)
 963     : CallGenerator(cg->method())
 964   {
 965     _intrinsic = intrinsic;
 966     _cg        = cg;
 967   }
 968 
 969   virtual bool      is_virtual()   const    { return true; }
 970   virtual bool      is_inlined()   const    { return true; }
 971   virtual bool      is_intrinsic() const    { return true; }
 972 
 973   virtual JVMState* generate(JVMState* jvms);
 974 };
 975 
 976 
 977 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
 978                                                        CallGenerator* cg) {
 979   return new PredicatedIntrinsicGenerator(intrinsic, cg);
 980 }
 981 
 982 
 983 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
 984   // The code we want to generate here is:
 985   //    if (receiver == NULL)
 986   //        uncommon_Trap
 987   //    if (predicate(0))
 988   //        do_intrinsic(0)
 989   //    else
 990   //    if (predicate(1))
 991   //        do_intrinsic(1)
 992   //    ...
 993   //    else
 994   //        do_java_comp
 995 
 996   GraphKit kit(jvms);
 997   PhaseGVN& gvn = kit.gvn();
 998 
 999   CompileLog* log = kit.C->log();
1000   if (log != NULL) {
1001     log->elem("predicated_intrinsic bci='%d' method='%d'",
1002               jvms->bci(), log->identify(method()));
1003   }
1004 
1005   if (!method()->is_static()) {
1006     // We need an explicit receiver null_check before checking its type in predicate.
1007     // We share a map with the caller, so his JVMS gets adjusted.
1008     Node* receiver = kit.null_check_receiver_before_call(method());
1009     if (kit.stopped()) {
1010       return kit.transfer_exceptions_into_jvms();
1011     }
1012   }
1013 
1014   int n_predicates = _intrinsic->predicates_count();
1015   assert(n_predicates > 0, "sanity");
1016 
1017   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1018 
1019   // Region for normal compilation code if intrinsic failed.
1020   Node* slow_region = new RegionNode(1);
1021 
1022   int results = 0;
1023   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1024 #ifdef ASSERT
1025     JVMState* old_jvms = kit.jvms();
1026     SafePointNode* old_map = kit.map();
1027     Node* old_io  = old_map->i_o();
1028     Node* old_mem = old_map->memory();
1029     Node* old_exc = old_map->next_exception();
1030 #endif
1031     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1032 #ifdef ASSERT
1033     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1034     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1035     SafePointNode* new_map = kit.map();
1036     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1037     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1038     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1039 #endif
1040     if (!kit.stopped()) {
1041       PreserveJVMState pjvms(&kit);
1042       // Generate intrinsic code:
1043       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1044       if (new_jvms == NULL) {
1045         // Intrinsic failed, use normal compilation path for this predicate.
1046         slow_region->add_req(kit.control());
1047       } else {
1048         kit.add_exception_states_from(new_jvms);
1049         kit.set_jvms(new_jvms);
1050         if (!kit.stopped()) {
1051           result_jvms[results++] = kit.jvms();
1052         }
1053       }
1054     }
1055     if (else_ctrl == NULL) {
1056       else_ctrl = kit.C->top();
1057     }
1058     kit.set_control(else_ctrl);
1059   }
1060   if (!kit.stopped()) {
1061     // Final 'else' after predicates.
1062     slow_region->add_req(kit.control());
1063   }
1064   if (slow_region->req() > 1) {
1065     PreserveJVMState pjvms(&kit);
1066     // Generate normal compilation code:
1067     kit.set_control(gvn.transform(slow_region));
1068     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1069     if (kit.failing())
1070       return NULL;  // might happen because of NodeCountInliningCutoff
1071     assert(new_jvms != NULL, "must be");
1072     kit.add_exception_states_from(new_jvms);
1073     kit.set_jvms(new_jvms);
1074     if (!kit.stopped()) {
1075       result_jvms[results++] = kit.jvms();
1076     }
1077   }
1078 
1079   if (results == 0) {
1080     // All paths ended in uncommon traps.
1081     (void) kit.stop();
1082     return kit.transfer_exceptions_into_jvms();
1083   }
1084 
1085   if (results == 1) { // Only one path
1086     kit.set_jvms(result_jvms[0]);
1087     return kit.transfer_exceptions_into_jvms();
1088   }
1089 
1090   // Merge all paths.
1091   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1092   RegionNode* region = new RegionNode(results + 1);
1093   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1094   for (int i = 0; i < results; i++) {
1095     JVMState* jvms = result_jvms[i];
1096     int path = i + 1;
1097     SafePointNode* map = jvms->map();
1098     region->init_req(path, map->control());
1099     iophi->set_req(path, map->i_o());
1100     if (i == 0) {
1101       kit.set_jvms(jvms);
1102     } else {
1103       kit.merge_memory(map->merged_memory(), region, path);
1104     }
1105   }
1106   kit.set_control(gvn.transform(region));
1107   kit.set_i_o(gvn.transform(iophi));
1108   // Transform new memory Phis.
1109   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1110     Node* phi = mms.memory();
1111     if (phi->is_Phi() && phi->in(0) == region) {
1112       mms.set_memory(gvn.transform(phi));
1113     }
1114   }
1115 
1116   // Merge debug info.
1117   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1118   uint tos = kit.jvms()->stkoff() + kit.sp();
1119   Node* map = kit.map();
1120   uint limit = map->req();
1121   for (uint i = TypeFunc::Parms; i < limit; i++) {
1122     // Skip unused stack slots; fast forward to monoff();
1123     if (i == tos) {
1124       i = kit.jvms()->monoff();
1125       if( i >= limit ) break;
1126     }
1127     Node* n = map->in(i);
1128     ins[0] = n;
1129     const Type* t = gvn.type(n);
1130     bool needs_phi = false;
1131     for (int j = 1; j < results; j++) {
1132       JVMState* jvms = result_jvms[j];
1133       Node* jmap = jvms->map();
1134       Node* m = NULL;
1135       if (jmap->req() > i) {
1136         m = jmap->in(i);
1137         if (m != n) {
1138           needs_phi = true;
1139           t = t->meet_speculative(gvn.type(m));
1140         }
1141       }
1142       ins[j] = m;
1143     }
1144     if (needs_phi) {
1145       Node* phi = PhiNode::make(region, n, t);
1146       for (int j = 1; j < results; j++) {
1147         phi->set_req(j + 1, ins[j]);
1148       }
1149       map->set_req(i, gvn.transform(phi));
1150     }
1151   }
1152 
1153   return kit.transfer_exceptions_into_jvms();
1154 }
1155 
1156 //-------------------------UncommonTrapCallGenerator-----------------------------
1157 // Internal class which handles all out-of-line calls checking receiver type.
1158 class UncommonTrapCallGenerator : public CallGenerator {
1159   Deoptimization::DeoptReason _reason;
1160   Deoptimization::DeoptAction _action;
1161 
1162 public:
1163   UncommonTrapCallGenerator(ciMethod* m,
1164                             Deoptimization::DeoptReason reason,
1165                             Deoptimization::DeoptAction action)
1166     : CallGenerator(m)
1167   {
1168     _reason = reason;
1169     _action = action;
1170   }
1171 
1172   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
1173   virtual bool      is_trap() const             { return true; }
1174 
1175   virtual JVMState* generate(JVMState* jvms);
1176 };
1177 
1178 
1179 CallGenerator*
1180 CallGenerator::for_uncommon_trap(ciMethod* m,
1181                                  Deoptimization::DeoptReason reason,
1182                                  Deoptimization::DeoptAction action) {
1183   return new UncommonTrapCallGenerator(m, reason, action);
1184 }
1185 
1186 
1187 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1188   GraphKit kit(jvms);
1189   kit.C->print_inlining_update(this);
1190   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1191   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1192   // Use callsite signature always.
1193   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1194   int nargs = declared_method->arg_size();
1195   kit.inc_sp(nargs);
1196   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1197   if (_reason == Deoptimization::Reason_class_check &&
1198       _action == Deoptimization::Action_maybe_recompile) {
1199     // Temp fix for 6529811
1200     // Don't allow uncommon_trap to override our decision to recompile in the event
1201     // of a class cast failure for a monomorphic call as it will never let us convert
1202     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1203     bool keep_exact_action = true;
1204     kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1205   } else {
1206     kit.uncommon_trap(_reason, _action);
1207   }
1208   return kit.transfer_exceptions_into_jvms();
1209 }
1210 
1211 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1212 
1213 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)
1214 
1215 #define NODES_OVERHEAD_PER_METHOD (30.0)
1216 #define NODES_PER_BYTECODE (9.5)
1217 
1218 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1219   int call_count = profile.count();
1220   int code_size = call_method->code_size();
1221 
1222   // Expected execution count is based on the historical count:
1223   _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1224 
1225   // Expected profit from inlining, in units of simple call-overheads.
1226   _profit = 1.0;
1227 
1228   // Expected work performed by the call in units of call-overheads.
1229   // %%% need an empirical curve fit for "work" (time in call)
1230   float bytecodes_per_call = 3;
1231   _work = 1.0 + code_size / bytecodes_per_call;
1232 
1233   // Expected size of compilation graph:
1234   // -XX:+PrintParseStatistics once reported:
1235   //  Methods seen: 9184  Methods parsed: 9184  Nodes created: 1582391
1236   //  Histogram of 144298 parsed bytecodes:
1237   // %%% Need an better predictor for graph size.
1238   _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1239 }
1240 
1241 // is_cold:  Return true if the node should never be inlined.
1242 // This is true if any of the key metrics are extreme.
1243 bool WarmCallInfo::is_cold() const {
1244   if (count()  <  WarmCallMinCount)        return true;
1245   if (profit() <  WarmCallMinProfit)       return true;
1246   if (work()   >  WarmCallMaxWork)         return true;
1247   if (size()   >  WarmCallMaxSize)         return true;
1248   return false;
1249 }
1250 
1251 // is_hot:  Return true if the node should be inlined immediately.
1252 // This is true if any of the key metrics are extreme.
1253 bool WarmCallInfo::is_hot() const {
1254   assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1255   if (count()  >= HotCallCountThreshold)   return true;
1256   if (profit() >= HotCallProfitThreshold)  return true;
1257   if (work()   <= HotCallTrivialWork)      return true;
1258   if (size()   <= HotCallTrivialSize)      return true;
1259   return false;
1260 }
1261 
1262 // compute_heat:
1263 float WarmCallInfo::compute_heat() const {
1264   assert(!is_cold(), "compute heat only on warm nodes");
1265   assert(!is_hot(),  "compute heat only on warm nodes");
1266   int min_size = MAX2(0,   (int)HotCallTrivialSize);
1267   int max_size = MIN2(500, (int)WarmCallMaxSize);
1268   float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1269   float size_factor;
1270   if      (method_size < 0.05)  size_factor = 4;   // 2 sigmas better than avg.
1271   else if (method_size < 0.15)  size_factor = 2;   // 1 sigma better than avg.
1272   else if (method_size < 0.5)   size_factor = 1;   // better than avg.
1273   else                          size_factor = 0.5; // worse than avg.
1274   return (count() * profit() * size_factor);
1275 }
1276 
1277 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1278   assert(this != that, "compare only different WCIs");
1279   assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1280   if (this->heat() > that->heat())   return true;
1281   if (this->heat() < that->heat())   return false;
1282   assert(this->heat() == that->heat(), "no NaN heat allowed");
1283   // Equal heat.  Break the tie some other way.
1284   if (!this->call() || !that->call())  return (address)this > (address)that;
1285   return this->call()->_idx > that->call()->_idx;
1286 }
1287 
1288 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1289 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
1290 
1291 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1292   assert(next() == UNINIT_NEXT, "not yet on any list");
1293   WarmCallInfo* prev_p = NULL;
1294   WarmCallInfo* next_p = head;
1295   while (next_p != NULL && next_p->warmer_than(this)) {
1296     prev_p = next_p;
1297     next_p = prev_p->next();
1298   }
1299   // Install this between prev_p and next_p.
1300   this->set_next(next_p);
1301   if (prev_p == NULL)
1302     head = this;
1303   else
1304     prev_p->set_next(this);
1305   return head;
1306 }
1307 
1308 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1309   WarmCallInfo* prev_p = NULL;
1310   WarmCallInfo* next_p = head;
1311   while (next_p != this) {
1312     assert(next_p != NULL, "this must be in the list somewhere");
1313     prev_p = next_p;
1314     next_p = prev_p->next();
1315   }
1316   next_p = this->next();
1317   debug_only(this->set_next(UNINIT_NEXT));
1318   // Remove this from between prev_p and next_p.
1319   if (prev_p == NULL)
1320     head = next_p;
1321   else
1322     prev_p->set_next(next_p);
1323   return head;
1324 }
1325 
1326 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1327                                        WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1328 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1329                                         WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1330 
1331 WarmCallInfo* WarmCallInfo::always_hot() {
1332   assert(_always_hot.is_hot(), "must always be hot");
1333   return &_always_hot;
1334 }
1335 
1336 WarmCallInfo* WarmCallInfo::always_cold() {
1337   assert(_always_cold.is_cold(), "must always be cold");
1338   return &_always_cold;
1339 }
1340 
1341 
1342 #ifndef PRODUCT
1343 
1344 void WarmCallInfo::print() const {
1345   tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1346              is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1347              count(), profit(), work(), size(), compute_heat(), next());
1348   tty->cr();
1349   if (call() != NULL)  call()->dump();
1350 }
1351 
1352 void print_wci(WarmCallInfo* ci) {
1353   ci->print();
1354 }
1355 
1356 void WarmCallInfo::print_all() const {
1357   for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1358     p->print();
1359 }
1360 
1361 int WarmCallInfo::count_all() const {
1362   int cnt = 0;
1363   for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1364     cnt++;
1365   return cnt;
1366 }
1367 
1368 #endif //PRODUCT