src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/callGenerator.cpp

Print this page
rev 5411 : 8024069: replace_in_map() should operate on parent maps
Summary: type information gets lost because replace_in_map() doesn't update parent maps
Reviewed-by:
rev 5413 : [mq]: replaceinmapparents-cleanup


  46   return TypeFunc::make(method());
  47 }
  48 
  49 //-----------------------------ParseGenerator---------------------------------
  50 // Internal class which handles all direct bytecode traversal.
  51 class ParseGenerator : public InlineCallGenerator {
  52 private:
  53   bool  _is_osr;
  54   float _expected_uses;
  55 
  56 public:
  57   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  58     : InlineCallGenerator(method)
  59   {
  60     _is_osr        = is_osr;
  61     _expected_uses = expected_uses;
  62     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
  63   }
  64 
  65   virtual bool      is_parse() const           { return true; }
  66   virtual JVMState* generate(JVMState* jvms);
  67   int is_osr() { return _is_osr; }
  68 
  69 };
  70 
  71 JVMState* ParseGenerator::generate(JVMState* jvms) {
  72   Compile* C = Compile::current();
  73 
  74   if (is_osr()) {
  75     // The JVMS for a OSR has a single argument (see its TypeFunc).
  76     assert(jvms->depth() == 1, "no inline OSR");
  77   }
  78 
  79   if (C->failing()) {
  80     return NULL;  // bailing out of the compile; do not try to parse
  81   }
  82 
  83   Parse parser(jvms, method(), _expected_uses);
  84   // Grab signature for matching/allocation
  85 #ifdef ASSERT
  86   if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
  87     MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
  88     assert(C->env()->system_dictionary_modification_counter_changed(),
  89            "Must invalidate if TypeFuncs differ");
  90   }
  91 #endif
  92 
  93   GraphKit& exits = parser.exits();
  94 
  95   if (C->failing()) {
  96     while (exits.pop_exception_state() != NULL) ;
  97     return NULL;
  98   }
  99 
 100   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 101 
 102   // Simply return the exit state of the parser,
 103   // augmented by any exceptional states.
 104   return exits.transfer_exceptions_into_jvms();
 105 }
 106 
 107 //---------------------------DirectCallGenerator------------------------------
 108 // Internal class which handles all out-of-line calls w/o receiver type checks.
 109 class DirectCallGenerator : public CallGenerator {
 110  private:
 111   CallStaticJavaNode* _call_node;
 112   // Force separate memory and I/O projections for the exceptional
 113   // paths to facilitate late inlinig.
 114   bool                _separate_io_proj;
 115 
 116  public:
 117   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 118     : CallGenerator(method),
 119       _separate_io_proj(separate_io_proj)
 120   {
 121   }
 122   virtual JVMState* generate(JVMState* jvms);
 123 
 124   CallStaticJavaNode* call_node() const { return _call_node; }
 125 };
 126 
 127 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 128   GraphKit kit(jvms);
 129   bool is_static = method()->is_static();
 130   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 131                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 132 
 133   if (kit.C->log() != NULL) {
 134     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 135   }
 136 
 137   CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 138   _call_node = call;  // Save the call node in case we need it later
 139   if (!is_static) {
 140     // Make an explicit receiver null_check as part of this call.
 141     // Since we share a map with the caller, his JVMS gets adjusted.
 142     kit.null_check_receiver_before_call(method());
 143     if (kit.stopped()) {
 144       // And dump it back to the caller, decorated with any exceptions:
 145       return kit.transfer_exceptions_into_jvms();
 146     }
 147     // Mark the call node as virtual, sort of:


 154   kit.set_arguments_for_java_call(call);
 155   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 156   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 157   kit.push_node(method()->return_type()->basic_type(), ret);
 158   return kit.transfer_exceptions_into_jvms();
 159 }
 160 
 161 //--------------------------VirtualCallGenerator------------------------------
 162 // Internal class which handles all out-of-line calls checking receiver type.
 163 class VirtualCallGenerator : public CallGenerator {
 164 private:
 165   int _vtable_index;
 166 public:
 167   VirtualCallGenerator(ciMethod* method, int vtable_index)
 168     : CallGenerator(method), _vtable_index(vtable_index)
 169   {
 170     assert(vtable_index == Method::invalid_vtable_index ||
 171            vtable_index >= 0, "either invalid or usable");
 172   }
 173   virtual bool      is_virtual() const          { return true; }
 174   virtual JVMState* generate(JVMState* jvms);
 175 };
 176 
 177 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 178   GraphKit kit(jvms);
 179   Node* receiver = kit.argument(0);
 180 
 181   if (kit.C->log() != NULL) {
 182     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 183   }
 184 
 185   // If the receiver is a constant null, do not torture the system
 186   // by attempting to call through it.  The compile will proceed
 187   // correctly, but may bail out in final_graph_reshaping, because
 188   // the call instruction will have a seemingly deficient out-count.
 189   // (The bailout says something misleading about an "infinite loop".)
 190   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 191     kit.inc_sp(method()->arg_size());  // restore arguments
 192     kit.uncommon_trap(Deoptimization::Reason_null_check,
 193                       Deoptimization::Action_none,
 194                       NULL, "null receiver");
 195     return kit.transfer_exceptions_into_jvms();
 196   }
 197 


 259   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 260   return new VirtualCallGenerator(m, vtable_index);
 261 }
 262 
 263 // Allow inlining decisions to be delayed
 264 class LateInlineCallGenerator : public DirectCallGenerator {
 265  protected:
 266   CallGenerator* _inline_cg;
 267 
 268   virtual bool do_late_inline_check(JVMState* jvms) { return true; }
 269 
 270  public:
 271   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 272     DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
 273 
 274   virtual bool      is_late_inline() const { return true; }
 275 
 276   // Convert the CallStaticJava into an inline
 277   virtual void do_late_inline();
 278 
 279   virtual JVMState* generate(JVMState* jvms) {
 280     Compile *C = Compile::current();
 281     C->print_inlining_skip(this);
 282 
 283     // Record that this call site should be revisited once the main
 284     // parse is finished.
 285     if (!is_mh_late_inline()) {
 286       C->add_late_inline(this);
 287     }
 288 
 289     // Emit the CallStaticJava and request separate projections so
 290     // that the late inlining logic can distinguish between fall
 291     // through and exceptional uses of the memory and io projections
 292     // as is done for allocations and macro expansion.
 293     return DirectCallGenerator::generate(jvms);
 294   }
 295 
 296   virtual void print_inlining_late(const char* msg) {
 297     CallNode* call = call_node();
 298     Compile* C = Compile::current();
 299     C->print_inlining_insert(this);
 300     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 301   }
 302 
 303 };
 304 
 305 void LateInlineCallGenerator::do_late_inline() {
 306   // Can't inline it
 307   CallStaticJavaNode* call = call_node();
 308   if (call == NULL || call->outcnt() == 0 ||
 309       call->in(0) == NULL || call->in(0)->is_top()) {
 310     return;
 311   }
 312 
 313   const TypeTuple *r = call->tf()->domain();


 372   CompileLog* log = C->log();
 373   if (log != NULL) {
 374     log->head("late_inline method='%d'", log->identify(method()));
 375     JVMState* p = jvms;
 376     while (p != NULL) {
 377       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
 378       p = p->caller();
 379     }
 380     log->tail("late_inline");
 381   }
 382 
 383   // Setup default node notes to be picked up by the inlining
 384   Node_Notes* old_nn = C->default_node_notes();
 385   if (old_nn != NULL) {
 386     Node_Notes* entry_nn = old_nn->clone(C);
 387     entry_nn->set_jvms(jvms);
 388     C->set_default_node_notes(entry_nn);
 389   }
 390 
 391   // Now perform the inling using the synthesized JVMState
 392   JVMState* new_jvms = _inline_cg->generate(jvms);
 393   if (new_jvms == NULL)  return;  // no change
 394   if (C->failing())      return;
 395 
 396   // Capture any exceptional control flow
 397   GraphKit kit(new_jvms);
 398 
 399   // Find the result object
 400   Node* result = C->top();
 401   int   result_size = method()->return_type()->size();
 402   if (result_size != 0 && !kit.stopped()) {
 403     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 404   }
 405 
 406   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 407   C->env()->notice_inlined_method(_inline_cg->method());
 408   C->set_inlining_progress(true);
 409 
 410   kit.replace_call(call, result);
 411 }
 412 
 413 
 414 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 415   return new LateInlineCallGenerator(method, inline_cg);
 416 }
 417 
 418 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 419   ciMethod* _caller;
 420   int _attempt;
 421   bool _input_not_const;
 422 
 423   virtual bool do_late_inline_check(JVMState* jvms);
 424   virtual bool already_attempted() const { return _attempt > 0; }
 425 
 426  public:
 427   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 428     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 429 
 430   virtual bool is_mh_late_inline() const { return true; }
 431 
 432   virtual JVMState* generate(JVMState* jvms) {
 433     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 434     if (_input_not_const) {
 435       // inlining won't be possible so no need to enqueue right now.
 436       call_node()->set_generator(this);
 437     } else {
 438       Compile::current()->add_late_inline(this);
 439     }
 440     return new_jvms;
 441   }
 442 
 443   virtual void print_inlining_late(const char* msg) {
 444     if (!_input_not_const) return;
 445     LateInlineCallGenerator::print_inlining_late(msg);
 446   }
 447 };
 448 
 449 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 450 
 451   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 452 
 453   if (!_input_not_const) {


 460     Compile::current()->dec_number_of_mh_late_inlines();
 461     return true;
 462   }
 463 
 464   call_node()->set_generator(this);
 465   return false;
 466 }
 467 
 468 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 469   Compile::current()->inc_number_of_mh_late_inlines();
 470   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 471   return cg;
 472 }
 473 
 474 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 475 
 476  public:
 477   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 478     LateInlineCallGenerator(method, inline_cg) {}
 479 
 480   virtual JVMState* generate(JVMState* jvms) {
 481     Compile *C = Compile::current();
 482     C->print_inlining_skip(this);
 483 
 484     C->add_string_late_inline(this);
 485 
 486     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 487     return new_jvms;
 488   }
 489 };
 490 
 491 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 492   return new LateInlineStringCallGenerator(method, inline_cg);
 493 }
 494 
 495 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 496 
 497  public:
 498   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 499     LateInlineCallGenerator(method, inline_cg) {}
 500 
 501   virtual JVMState* generate(JVMState* jvms) {
 502     Compile *C = Compile::current();
 503     C->print_inlining_skip(this);
 504 
 505     C->add_boxing_late_inline(this);
 506 
 507     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 508     return new_jvms;
 509   }
 510 };
 511 
 512 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 513   return new LateInlineBoxingCallGenerator(method, inline_cg);
 514 }
 515 
 516 //---------------------------WarmCallGenerator--------------------------------
 517 // Internal class which handles initial deferral of inlining decisions.
 518 class WarmCallGenerator : public CallGenerator {
 519   WarmCallInfo*   _call_info;
 520   CallGenerator*  _if_cold;
 521   CallGenerator*  _if_hot;
 522   bool            _is_virtual;   // caches virtuality of if_cold
 523   bool            _is_inline;    // caches inline-ness of if_hot
 524 
 525 public:
 526   WarmCallGenerator(WarmCallInfo* ci,
 527                     CallGenerator* if_cold,
 528                     CallGenerator* if_hot)
 529     : CallGenerator(if_cold->method())
 530   {
 531     assert(method() == if_hot->method(), "consistent choices");
 532     _call_info  = ci;
 533     _if_cold    = if_cold;
 534     _if_hot     = if_hot;
 535     _is_virtual = if_cold->is_virtual();
 536     _is_inline  = if_hot->is_inline();
 537   }
 538 
 539   virtual bool      is_inline() const           { return _is_inline; }
 540   virtual bool      is_virtual() const          { return _is_virtual; }
 541   virtual bool      is_deferred() const         { return true; }
 542 
 543   virtual JVMState* generate(JVMState* jvms);
 544 };
 545 
 546 
 547 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
 548                                             CallGenerator* if_cold,
 549                                             CallGenerator* if_hot) {
 550   return new WarmCallGenerator(ci, if_cold, if_hot);
 551 }
 552 
 553 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
 554   Compile* C = Compile::current();
 555   if (C->log() != NULL) {
 556     C->log()->elem("warm_call bci='%d'", jvms->bci());
 557   }
 558   jvms = _if_cold->generate(jvms);
 559   if (jvms != NULL) {
 560     Node* m = jvms->map()->control();
 561     if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
 562     if (m->is_Catch())     m = m->in(0);  else m = C->top();
 563     if (m->is_Proj())      m = m->in(0);  else m = C->top();
 564     if (m->is_CallJava()) {
 565       _call_info->set_call(m->as_Call());
 566       _call_info->set_hot_cg(_if_hot);
 567 #ifndef PRODUCT
 568       if (PrintOpto || PrintOptoInlining) {
 569         tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
 570         tty->print("WCI: ");
 571         _call_info->print();
 572       }
 573 #endif
 574       _call_info->set_heat(_call_info->compute_heat());
 575       C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
 576     }
 577   }
 578   return jvms;


 599   PredictedCallGenerator(ciKlass* predicted_receiver,
 600                          CallGenerator* if_missed,
 601                          CallGenerator* if_hit, float hit_prob)
 602     : CallGenerator(if_missed->method())
 603   {
 604     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 605     // Remove the extremes values from the range.
 606     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 607     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 608 
 609     _predicted_receiver = predicted_receiver;
 610     _if_missed          = if_missed;
 611     _if_hit             = if_hit;
 612     _hit_prob           = hit_prob;
 613   }
 614 
 615   virtual bool      is_virtual()   const    { return true; }
 616   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 617   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 618 
 619   virtual JVMState* generate(JVMState* jvms);
 620 };
 621 
 622 
 623 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 624                                                  CallGenerator* if_missed,
 625                                                  CallGenerator* if_hit,
 626                                                  float hit_prob) {
 627   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
 628 }
 629 
 630 
 631 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
 632   GraphKit kit(jvms);
 633   PhaseGVN& gvn = kit.gvn();
 634   // We need an explicit receiver null_check before checking its type.
 635   // We share a map with the caller, so his JVMS gets adjusted.
 636   Node* receiver = kit.argument(0);
 637 
 638   CompileLog* log = kit.C->log();
 639   if (log != NULL) {
 640     log->elem("predicted_call bci='%d' klass='%d'",
 641               jvms->bci(), log->identify(_predicted_receiver));
 642   }
 643 
 644   receiver = kit.null_check_receiver_before_call(method());
 645   if (kit.stopped()) {
 646     return kit.transfer_exceptions_into_jvms();
 647   }
 648 
 649   Node* exact_receiver = receiver;  // will get updated in place...
 650   Node* slow_ctl = kit.type_check_receiver(receiver,
 651                                            _predicted_receiver, _hit_prob,
 652                                            &exact_receiver);
 653 
 654   SafePointNode* slow_map = NULL;
 655   JVMState* slow_jvms;
 656   { PreserveJVMState pjvms(&kit);
 657     kit.set_control(slow_ctl);
 658     if (!kit.stopped()) {
 659       slow_jvms = _if_missed->generate(kit.sync_jvms());
 660       if (kit.failing())
 661         return NULL;  // might happen because of NodeCountInliningCutoff
 662       assert(slow_jvms != NULL, "must be");
 663       kit.add_exception_states_from(slow_jvms);
 664       kit.set_map(slow_jvms->map());
 665       if (!kit.stopped())
 666         slow_map = kit.stop();
 667     }
 668   }
 669 
 670   if (kit.stopped()) {
 671     // Instance exactly does not matches the desired type.
 672     kit.set_jvms(slow_jvms);
 673     return kit.transfer_exceptions_into_jvms();
 674   }
 675 
 676   // fall through if the instance exactly matches the desired type
 677   kit.replace_in_map(receiver, exact_receiver);
 678 
 679   // Make the hot call:
 680   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
 681   if (new_jvms == NULL) {
 682     // Inline failed, so make a direct call.
 683     assert(_if_hit->is_inline(), "must have been a failed inline");
 684     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 685     new_jvms = cg->generate(kit.sync_jvms());
 686   }
 687   kit.add_exception_states_from(new_jvms);
 688   kit.set_jvms(new_jvms);
 689 
 690   // Need to merge slow and fast?
 691   if (slow_map == NULL) {
 692     // The fast path is the only path remaining.
 693     return kit.transfer_exceptions_into_jvms();
 694   }
 695 
 696   if (kit.stopped()) {
 697     // Inlined method threw an exception, so it's just the slow path after all.
 698     kit.set_jvms(slow_jvms);
 699     return kit.transfer_exceptions_into_jvms();
 700   }
 701 
 702   // Finish the diamond.
 703   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
 704   RegionNode* region = new (kit.C) RegionNode(3);
 705   region->init_req(1, kit.control());


 857 
 858 //------------------------PredictedIntrinsicGenerator------------------------------
 859 // Internal class which handles all predicted Intrinsic calls.
 860 class PredictedIntrinsicGenerator : public CallGenerator {
 861   CallGenerator* _intrinsic;
 862   CallGenerator* _cg;
 863 
 864 public:
 865   PredictedIntrinsicGenerator(CallGenerator* intrinsic,
 866                               CallGenerator* cg)
 867     : CallGenerator(cg->method())
 868   {
 869     _intrinsic = intrinsic;
 870     _cg        = cg;
 871   }
 872 
 873   virtual bool      is_virtual()   const    { return true; }
 874   virtual bool      is_inlined()   const    { return true; }
 875   virtual bool      is_intrinsic() const    { return true; }
 876 
 877   virtual JVMState* generate(JVMState* jvms);
 878 };
 879 
 880 
 881 CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
 882                                                       CallGenerator* cg) {
 883   return new PredictedIntrinsicGenerator(intrinsic, cg);
 884 }
 885 
 886 
 887 JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
 888   GraphKit kit(jvms);
 889   PhaseGVN& gvn = kit.gvn();
 890 
 891   CompileLog* log = kit.C->log();
 892   if (log != NULL) {
 893     log->elem("predicted_intrinsic bci='%d' method='%d'",
 894               jvms->bci(), log->identify(method()));
 895   }
 896 
 897   Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms());
 898   if (kit.failing())
 899     return NULL;  // might happen because of NodeCountInliningCutoff
 900 
 901   SafePointNode* slow_map = NULL;
 902   JVMState* slow_jvms;
 903   if (slow_ctl != NULL) {
 904     PreserveJVMState pjvms(&kit);
 905     kit.set_control(slow_ctl);
 906     if (!kit.stopped()) {
 907       slow_jvms = _cg->generate(kit.sync_jvms());
 908       if (kit.failing())
 909         return NULL;  // might happen because of NodeCountInliningCutoff
 910       assert(slow_jvms != NULL, "must be");
 911       kit.add_exception_states_from(slow_jvms);
 912       kit.set_map(slow_jvms->map());
 913       if (!kit.stopped())
 914         slow_map = kit.stop();
 915     }
 916   }
 917 
 918   if (kit.stopped()) {
 919     // Predicate is always false.
 920     kit.set_jvms(slow_jvms);
 921     return kit.transfer_exceptions_into_jvms();
 922   }
 923 
 924   // Generate intrinsic code:
 925   JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
 926   if (new_jvms == NULL) {
 927     // Intrinsic failed, so use slow code or make a direct call.
 928     if (slow_map == NULL) {
 929       CallGenerator* cg = CallGenerator::for_direct_call(method());
 930       new_jvms = cg->generate(kit.sync_jvms());
 931     } else {
 932       kit.set_jvms(slow_jvms);
 933       return kit.transfer_exceptions_into_jvms();
 934     }
 935   }
 936   kit.add_exception_states_from(new_jvms);
 937   kit.set_jvms(new_jvms);
 938 
 939   // Need to merge slow and fast?
 940   if (slow_map == NULL) {
 941     // The fast path is the only path remaining.
 942     return kit.transfer_exceptions_into_jvms();
 943   }
 944 
 945   if (kit.stopped()) {
 946     // Intrinsic method threw an exception, so it's just the slow path after all.
 947     kit.set_jvms(slow_jvms);
 948     return kit.transfer_exceptions_into_jvms();
 949   }
 950 


 980 
 981 //-------------------------UncommonTrapCallGenerator-----------------------------
 982 // Internal class which handles all out-of-line calls checking receiver type.
 983 class UncommonTrapCallGenerator : public CallGenerator {
 984   Deoptimization::DeoptReason _reason;
 985   Deoptimization::DeoptAction _action;
 986 
 987 public:
 988   UncommonTrapCallGenerator(ciMethod* m,
 989                             Deoptimization::DeoptReason reason,
 990                             Deoptimization::DeoptAction action)
 991     : CallGenerator(m)
 992   {
 993     _reason = reason;
 994     _action = action;
 995   }
 996 
 997   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
 998   virtual bool      is_trap() const             { return true; }
 999 
1000   virtual JVMState* generate(JVMState* jvms);
1001 };
1002 
1003 
1004 CallGenerator*
1005 CallGenerator::for_uncommon_trap(ciMethod* m,
1006                                  Deoptimization::DeoptReason reason,
1007                                  Deoptimization::DeoptAction action) {
1008   return new UncommonTrapCallGenerator(m, reason, action);
1009 }
1010 
1011 
1012 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1013   GraphKit kit(jvms);
1014   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1015   int nargs = method()->arg_size();
1016   kit.inc_sp(nargs);
1017   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1018   if (_reason == Deoptimization::Reason_class_check &&
1019       _action == Deoptimization::Action_maybe_recompile) {
1020     // Temp fix for 6529811
1021     // Don't allow uncommon_trap to override our decision to recompile in the event
1022     // of a class cast failure for a monomorphic call as it will never let us convert
1023     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1024     bool keep_exact_action = true;
1025     kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1026   } else {
1027     kit.uncommon_trap(_reason, _action);
1028   }
1029   return kit.transfer_exceptions_into_jvms();
1030 }
1031 
1032 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)




  46   return TypeFunc::make(method());
  47 }
  48 
  49 //-----------------------------ParseGenerator---------------------------------
  50 // Internal class which handles all direct bytecode traversal.
  51 class ParseGenerator : public InlineCallGenerator {
  52 private:
  53   bool  _is_osr;
  54   float _expected_uses;
  55 
  56 public:
  57   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  58     : InlineCallGenerator(method)
  59   {
  60     _is_osr        = is_osr;
  61     _expected_uses = expected_uses;
  62     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
  63   }
  64 
  65   virtual bool      is_parse() const           { return true; }
  66   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
  67   int is_osr() { return _is_osr; }
  68 
  69 };
  70 
  71 JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
  72   Compile* C = Compile::current();
  73 
  74   if (is_osr()) {
  75     // The JVMS for a OSR has a single argument (see its TypeFunc).
  76     assert(jvms->depth() == 1, "no inline OSR");
  77   }
  78 
  79   if (C->failing()) {
  80     return NULL;  // bailing out of the compile; do not try to parse
  81   }
  82 
  83   Parse parser(jvms, method(), _expected_uses, parent_parser);
  84   // Grab signature for matching/allocation
  85 #ifdef ASSERT
  86   if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
  87     MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
  88     assert(C->env()->system_dictionary_modification_counter_changed(),
  89            "Must invalidate if TypeFuncs differ");
  90   }
  91 #endif
  92 
  93   GraphKit& exits = parser.exits();
  94 
  95   if (C->failing()) {
  96     while (exits.pop_exception_state() != NULL) ;
  97     return NULL;
  98   }
  99 
 100   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 101 
 102   // Simply return the exit state of the parser,
 103   // augmented by any exceptional states.
 104   return exits.transfer_exceptions_into_jvms();
 105 }
 106 
 107 //---------------------------DirectCallGenerator------------------------------
 108 // Internal class which handles all out-of-line calls w/o receiver type checks.
 109 class DirectCallGenerator : public CallGenerator {
 110  private:
 111   CallStaticJavaNode* _call_node;
 112   // Force separate memory and I/O projections for the exceptional
 113   // paths to facilitate late inlinig.
 114   bool                _separate_io_proj;
 115 
 116  public:
 117   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 118     : CallGenerator(method),
 119       _separate_io_proj(separate_io_proj)
 120   {
 121   }
 122   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 123 
 124   CallStaticJavaNode* call_node() const { return _call_node; }
 125 };
 126 
 127 JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
 128   GraphKit kit(jvms);
 129   bool is_static = method()->is_static();
 130   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 131                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 132 
 133   if (kit.C->log() != NULL) {
 134     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 135   }
 136 
 137   CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
 138   _call_node = call;  // Save the call node in case we need it later
 139   if (!is_static) {
 140     // Make an explicit receiver null_check as part of this call.
 141     // Since we share a map with the caller, his JVMS gets adjusted.
 142     kit.null_check_receiver_before_call(method());
 143     if (kit.stopped()) {
 144       // And dump it back to the caller, decorated with any exceptions:
 145       return kit.transfer_exceptions_into_jvms();
 146     }
 147     // Mark the call node as virtual, sort of:


 154   kit.set_arguments_for_java_call(call);
 155   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 156   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 157   kit.push_node(method()->return_type()->basic_type(), ret);
 158   return kit.transfer_exceptions_into_jvms();
 159 }
 160 
 161 //--------------------------VirtualCallGenerator------------------------------
 162 // Internal class which handles all out-of-line calls checking receiver type.
 163 class VirtualCallGenerator : public CallGenerator {
 164 private:
 165   int _vtable_index;
 166 public:
 167   VirtualCallGenerator(ciMethod* method, int vtable_index)
 168     : CallGenerator(method), _vtable_index(vtable_index)
 169   {
 170     assert(vtable_index == Method::invalid_vtable_index ||
 171            vtable_index >= 0, "either invalid or usable");
 172   }
 173   virtual bool      is_virtual() const          { return true; }
 174   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 175 };
 176 
 177 JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
 178   GraphKit kit(jvms);
 179   Node* receiver = kit.argument(0);
 180 
 181   if (kit.C->log() != NULL) {
 182     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 183   }
 184 
 185   // If the receiver is a constant null, do not torture the system
 186   // by attempting to call through it.  The compile will proceed
 187   // correctly, but may bail out in final_graph_reshaping, because
 188   // the call instruction will have a seemingly deficient out-count.
 189   // (The bailout says something misleading about an "infinite loop".)
 190   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 191     kit.inc_sp(method()->arg_size());  // restore arguments
 192     kit.uncommon_trap(Deoptimization::Reason_null_check,
 193                       Deoptimization::Action_none,
 194                       NULL, "null receiver");
 195     return kit.transfer_exceptions_into_jvms();
 196   }
 197 


 259   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 260   return new VirtualCallGenerator(m, vtable_index);
 261 }
 262 
 263 // Allow inlining decisions to be delayed
 264 class LateInlineCallGenerator : public DirectCallGenerator {
 265  protected:
 266   CallGenerator* _inline_cg;
 267 
 268   virtual bool do_late_inline_check(JVMState* jvms) { return true; }
 269 
 270  public:
 271   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 272     DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
 273 
 274   virtual bool      is_late_inline() const { return true; }
 275 
 276   // Convert the CallStaticJava into an inline
 277   virtual void do_late_inline();
 278 
 279   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 280     Compile *C = Compile::current();
 281     C->print_inlining_skip(this);
 282 
 283     // Record that this call site should be revisited once the main
 284     // parse is finished.
 285     if (!is_mh_late_inline()) {
 286       C->add_late_inline(this);
 287     }
 288 
 289     // Emit the CallStaticJava and request separate projections so
 290     // that the late inlining logic can distinguish between fall
 291     // through and exceptional uses of the memory and io projections
 292     // as is done for allocations and macro expansion.
 293     return DirectCallGenerator::generate(jvms, parent_parser);
 294   }
 295 
 296   virtual void print_inlining_late(const char* msg) {
 297     CallNode* call = call_node();
 298     Compile* C = Compile::current();
 299     C->print_inlining_insert(this);
 300     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 301   }
 302 
 303 };
 304 
 305 void LateInlineCallGenerator::do_late_inline() {
 306   // Can't inline it
 307   CallStaticJavaNode* call = call_node();
 308   if (call == NULL || call->outcnt() == 0 ||
 309       call->in(0) == NULL || call->in(0)->is_top()) {
 310     return;
 311   }
 312 
 313   const TypeTuple *r = call->tf()->domain();


 372   CompileLog* log = C->log();
 373   if (log != NULL) {
 374     log->head("late_inline method='%d'", log->identify(method()));
 375     JVMState* p = jvms;
 376     while (p != NULL) {
 377       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
 378       p = p->caller();
 379     }
 380     log->tail("late_inline");
 381   }
 382 
 383   // Setup default node notes to be picked up by the inlining
 384   Node_Notes* old_nn = C->default_node_notes();
 385   if (old_nn != NULL) {
 386     Node_Notes* entry_nn = old_nn->clone(C);
 387     entry_nn->set_jvms(jvms);
 388     C->set_default_node_notes(entry_nn);
 389   }
 390 
 391   // Now perform the inling using the synthesized JVMState
 392   JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
 393   if (new_jvms == NULL)  return;  // no change
 394   if (C->failing())      return;
 395 
 396   // Capture any exceptional control flow
 397   GraphKit kit(new_jvms);
 398 
 399   // Find the result object
 400   Node* result = C->top();
 401   int   result_size = method()->return_type()->size();
 402   if (result_size != 0 && !kit.stopped()) {
 403     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 404   }
 405 
 406   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 407   C->env()->notice_inlined_method(_inline_cg->method());
 408   C->set_inlining_progress(true);
 409 
 410   kit.replace_call(call, result);
 411 }
 412 
 413 
 414 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 415   return new LateInlineCallGenerator(method, inline_cg);
 416 }
 417 
 418 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 419   ciMethod* _caller;
 420   int _attempt;
 421   bool _input_not_const;
 422 
 423   virtual bool do_late_inline_check(JVMState* jvms);
 424   virtual bool already_attempted() const { return _attempt > 0; }
 425 
 426  public:
 427   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 428     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 429 
 430   virtual bool is_mh_late_inline() const { return true; }
 431 
 432   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 433     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
 434     if (_input_not_const) {
 435       // inlining won't be possible so no need to enqueue right now.
 436       call_node()->set_generator(this);
 437     } else {
 438       Compile::current()->add_late_inline(this);
 439     }
 440     return new_jvms;
 441   }
 442 
 443   virtual void print_inlining_late(const char* msg) {
 444     if (!_input_not_const) return;
 445     LateInlineCallGenerator::print_inlining_late(msg);
 446   }
 447 };
 448 
 449 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 450 
 451   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 452 
 453   if (!_input_not_const) {


 460     Compile::current()->dec_number_of_mh_late_inlines();
 461     return true;
 462   }
 463 
 464   call_node()->set_generator(this);
 465   return false;
 466 }
 467 
 468 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 469   Compile::current()->inc_number_of_mh_late_inlines();
 470   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 471   return cg;
 472 }
 473 
 474 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 475 
 476  public:
 477   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 478     LateInlineCallGenerator(method, inline_cg) {}
 479 
 480   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 481     Compile *C = Compile::current();
 482     C->print_inlining_skip(this);
 483 
 484     C->add_string_late_inline(this);
 485 
 486     JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
 487     return new_jvms;
 488   }
 489 };
 490 
 491 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 492   return new LateInlineStringCallGenerator(method, inline_cg);
 493 }
 494 
 495 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 496 
 497  public:
 498   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 499     LateInlineCallGenerator(method, inline_cg) {}
 500 
 501   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 502     Compile *C = Compile::current();
 503     C->print_inlining_skip(this);
 504 
 505     C->add_boxing_late_inline(this);
 506 
 507     JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
 508     return new_jvms;
 509   }
 510 };
 511 
 512 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 513   return new LateInlineBoxingCallGenerator(method, inline_cg);
 514 }
 515 
 516 //---------------------------WarmCallGenerator--------------------------------
 517 // Internal class which handles initial deferral of inlining decisions.
 518 class WarmCallGenerator : public CallGenerator {
 519   WarmCallInfo*   _call_info;
 520   CallGenerator*  _if_cold;
 521   CallGenerator*  _if_hot;
 522   bool            _is_virtual;   // caches virtuality of if_cold
 523   bool            _is_inline;    // caches inline-ness of if_hot
 524 
 525 public:
 526   WarmCallGenerator(WarmCallInfo* ci,
 527                     CallGenerator* if_cold,
 528                     CallGenerator* if_hot)
 529     : CallGenerator(if_cold->method())
 530   {
 531     assert(method() == if_hot->method(), "consistent choices");
 532     _call_info  = ci;
 533     _if_cold    = if_cold;
 534     _if_hot     = if_hot;
 535     _is_virtual = if_cold->is_virtual();
 536     _is_inline  = if_hot->is_inline();
 537   }
 538 
 539   virtual bool      is_inline() const           { return _is_inline; }
 540   virtual bool      is_virtual() const          { return _is_virtual; }
 541   virtual bool      is_deferred() const         { return true; }
 542 
 543   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 544 };
 545 
 546 
 547 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
 548                                             CallGenerator* if_cold,
 549                                             CallGenerator* if_hot) {
 550   return new WarmCallGenerator(ci, if_cold, if_hot);
 551 }
 552 
 553 JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
 554   Compile* C = Compile::current();
 555   if (C->log() != NULL) {
 556     C->log()->elem("warm_call bci='%d'", jvms->bci());
 557   }
 558   jvms = _if_cold->generate(jvms, parent_parser);
 559   if (jvms != NULL) {
 560     Node* m = jvms->map()->control();
 561     if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
 562     if (m->is_Catch())     m = m->in(0);  else m = C->top();
 563     if (m->is_Proj())      m = m->in(0);  else m = C->top();
 564     if (m->is_CallJava()) {
 565       _call_info->set_call(m->as_Call());
 566       _call_info->set_hot_cg(_if_hot);
 567 #ifndef PRODUCT
 568       if (PrintOpto || PrintOptoInlining) {
 569         tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
 570         tty->print("WCI: ");
 571         _call_info->print();
 572       }
 573 #endif
 574       _call_info->set_heat(_call_info->compute_heat());
 575       C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
 576     }
 577   }
 578   return jvms;


 599   PredictedCallGenerator(ciKlass* predicted_receiver,
 600                          CallGenerator* if_missed,
 601                          CallGenerator* if_hit, float hit_prob)
 602     : CallGenerator(if_missed->method())
 603   {
 604     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 605     // Remove the extremes values from the range.
 606     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 607     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 608 
 609     _predicted_receiver = predicted_receiver;
 610     _if_missed          = if_missed;
 611     _if_hit             = if_hit;
 612     _hit_prob           = hit_prob;
 613   }
 614 
 615   virtual bool      is_virtual()   const    { return true; }
 616   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 617   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 618 
 619   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 620 };
 621 
 622 
 623 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 624                                                  CallGenerator* if_missed,
 625                                                  CallGenerator* if_hit,
 626                                                  float hit_prob) {
 627   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
 628 }
 629 
 630 
 631 JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
 632   GraphKit kit(jvms);
 633   PhaseGVN& gvn = kit.gvn();
 634   // We need an explicit receiver null_check before checking its type.
 635   // We share a map with the caller, so his JVMS gets adjusted.
 636   Node* receiver = kit.argument(0);
 637 
 638   CompileLog* log = kit.C->log();
 639   if (log != NULL) {
 640     log->elem("predicted_call bci='%d' klass='%d'",
 641               jvms->bci(), log->identify(_predicted_receiver));
 642   }
 643 
 644   receiver = kit.null_check_receiver_before_call(method());
 645   if (kit.stopped()) {
 646     return kit.transfer_exceptions_into_jvms();
 647   }
 648 
 649   Node* exact_receiver = receiver;  // will get updated in place...
 650   Node* slow_ctl = kit.type_check_receiver(receiver,
 651                                            _predicted_receiver, _hit_prob,
 652                                            &exact_receiver);
 653 
 654   SafePointNode* slow_map = NULL;
 655   JVMState* slow_jvms;
 656   { PreserveJVMState pjvms(&kit);
 657     kit.set_control(slow_ctl);
 658     if (!kit.stopped()) {
 659       slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
 660       if (kit.failing())
 661         return NULL;  // might happen because of NodeCountInliningCutoff
 662       assert(slow_jvms != NULL, "must be");
 663       kit.add_exception_states_from(slow_jvms);
 664       kit.set_map(slow_jvms->map());
 665       if (!kit.stopped())
 666         slow_map = kit.stop();
 667     }
 668   }
 669 
 670   if (kit.stopped()) {
 671     // Instance exactly does not matches the desired type.
 672     kit.set_jvms(slow_jvms);
 673     return kit.transfer_exceptions_into_jvms();
 674   }
 675 
 676   // fall through if the instance exactly matches the desired type
 677   kit.replace_in_map(receiver, exact_receiver);
 678 
 679   // Make the hot call:
 680   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
 681   if (new_jvms == NULL) {
 682     // Inline failed, so make a direct call.
 683     assert(_if_hit->is_inline(), "must have been a failed inline");
 684     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
 685     new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
 686   }
 687   kit.add_exception_states_from(new_jvms);
 688   kit.set_jvms(new_jvms);
 689 
 690   // Need to merge slow and fast?
 691   if (slow_map == NULL) {
 692     // The fast path is the only path remaining.
 693     return kit.transfer_exceptions_into_jvms();
 694   }
 695 
 696   if (kit.stopped()) {
 697     // Inlined method threw an exception, so it's just the slow path after all.
 698     kit.set_jvms(slow_jvms);
 699     return kit.transfer_exceptions_into_jvms();
 700   }
 701 
 702   // Finish the diamond.
 703   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
 704   RegionNode* region = new (kit.C) RegionNode(3);
 705   region->init_req(1, kit.control());


 857 
 858 //------------------------PredictedIntrinsicGenerator------------------------------
 859 // Internal class which handles all predicted Intrinsic calls.
 860 class PredictedIntrinsicGenerator : public CallGenerator {
 861   CallGenerator* _intrinsic;
 862   CallGenerator* _cg;
 863 
 864 public:
 865   PredictedIntrinsicGenerator(CallGenerator* intrinsic,
 866                               CallGenerator* cg)
 867     : CallGenerator(cg->method())
 868   {
 869     _intrinsic = intrinsic;
 870     _cg        = cg;
 871   }
 872 
 873   virtual bool      is_virtual()   const    { return true; }
 874   virtual bool      is_inlined()   const    { return true; }
 875   virtual bool      is_intrinsic() const    { return true; }
 876 
 877   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 878 };
 879 
 880 
 881 CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
 882                                                       CallGenerator* cg) {
 883   return new PredictedIntrinsicGenerator(intrinsic, cg);
 884 }
 885 
 886 
 887 JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
 888   GraphKit kit(jvms);
 889   PhaseGVN& gvn = kit.gvn();
 890 
 891   CompileLog* log = kit.C->log();
 892   if (log != NULL) {
 893     log->elem("predicted_intrinsic bci='%d' method='%d'",
 894               jvms->bci(), log->identify(method()));
 895   }
 896 
 897   Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms());
 898   if (kit.failing())
 899     return NULL;  // might happen because of NodeCountInliningCutoff
 900 
 901   SafePointNode* slow_map = NULL;
 902   JVMState* slow_jvms;
 903   if (slow_ctl != NULL) {
 904     PreserveJVMState pjvms(&kit);
 905     kit.set_control(slow_ctl);
 906     if (!kit.stopped()) {
 907       slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
 908       if (kit.failing())
 909         return NULL;  // might happen because of NodeCountInliningCutoff
 910       assert(slow_jvms != NULL, "must be");
 911       kit.add_exception_states_from(slow_jvms);
 912       kit.set_map(slow_jvms->map());
 913       if (!kit.stopped())
 914         slow_map = kit.stop();
 915     }
 916   }
 917 
 918   if (kit.stopped()) {
 919     // Predicate is always false.
 920     kit.set_jvms(slow_jvms);
 921     return kit.transfer_exceptions_into_jvms();
 922   }
 923 
 924   // Generate intrinsic code:
 925   JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
 926   if (new_jvms == NULL) {
 927     // Intrinsic failed, so use slow code or make a direct call.
 928     if (slow_map == NULL) {
 929       CallGenerator* cg = CallGenerator::for_direct_call(method());
 930       new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
 931     } else {
 932       kit.set_jvms(slow_jvms);
 933       return kit.transfer_exceptions_into_jvms();
 934     }
 935   }
 936   kit.add_exception_states_from(new_jvms);
 937   kit.set_jvms(new_jvms);
 938 
 939   // Need to merge slow and fast?
 940   if (slow_map == NULL) {
 941     // The fast path is the only path remaining.
 942     return kit.transfer_exceptions_into_jvms();
 943   }
 944 
 945   if (kit.stopped()) {
 946     // Intrinsic method threw an exception, so it's just the slow path after all.
 947     kit.set_jvms(slow_jvms);
 948     return kit.transfer_exceptions_into_jvms();
 949   }
 950 


 980 
 981 //-------------------------UncommonTrapCallGenerator-----------------------------
 982 // Internal class which handles all out-of-line calls checking receiver type.
 983 class UncommonTrapCallGenerator : public CallGenerator {
 984   Deoptimization::DeoptReason _reason;
 985   Deoptimization::DeoptAction _action;
 986 
 987 public:
 988   UncommonTrapCallGenerator(ciMethod* m,
 989                             Deoptimization::DeoptReason reason,
 990                             Deoptimization::DeoptAction action)
 991     : CallGenerator(m)
 992   {
 993     _reason = reason;
 994     _action = action;
 995   }
 996 
 997   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
 998   virtual bool      is_trap() const             { return true; }
 999 
1000   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
1001 };
1002 
1003 
1004 CallGenerator*
1005 CallGenerator::for_uncommon_trap(ciMethod* m,
1006                                  Deoptimization::DeoptReason reason,
1007                                  Deoptimization::DeoptAction action) {
1008   return new UncommonTrapCallGenerator(m, reason, action);
1009 }
1010 
1011 
1012 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
1013   GraphKit kit(jvms);
1014   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1015   int nargs = method()->arg_size();
1016   kit.inc_sp(nargs);
1017   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1018   if (_reason == Deoptimization::Reason_class_check &&
1019       _action == Deoptimization::Action_maybe_recompile) {
1020     // Temp fix for 6529811
1021     // Don't allow uncommon_trap to override our decision to recompile in the event
1022     // of a class cast failure for a monomorphic call as it will never let us convert
1023     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1024     bool keep_exact_action = true;
1025     kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1026   } else {
1027     kit.uncommon_trap(_reason, _action);
1028   }
1029   return kit.transfer_exceptions_into_jvms();
1030 }
1031 
1032 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)


src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File