< prev index next >

src/share/vm/opto/doCall.cpp

Print this page
rev 12109 : 8167656: Unstable MethodHandle inlining causing huge performance variations
Summary: Don't forbid inlining of method handles without type profile
Reviewed-by:


  46     outputStream* out = tty;
  47     if (!C->print_inlining()) {
  48       if (!PrintOpto && !PrintCompilation) {
  49         method->print_short_name();
  50         tty->cr();
  51       }
  52       CompileTask::print_inlining_tty(prof_method, depth, bci);
  53     } else {
  54       out = C->print_inlining_stream();
  55     }
  56     CompileTask::print_inline_indent(depth, out);
  57     out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
  58     stringStream ss;
  59     prof_klass->name()->print_symbol_on(&ss);
  60     out->print("%s", ss.as_string());
  61     out->cr();
  62   }
  63 }
  64 
  65 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
  66                                        JVMState* jvms, bool allow_inline,
  67                                        float prof_factor, ciKlass* speculative_receiver_type,
  68                                        bool allow_intrinsics, bool delayed_forbidden) {
  69   ciMethod*       caller   = jvms->method();
  70   int             bci      = jvms->bci();
  71   Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
  72   guarantee(callee != NULL, "failed method resolution");
  73 
  74   // Dtrace currently doesn't work unless all calls are vanilla
  75   if (env()->dtrace_method_probes()) {
  76     allow_inline = false;
  77   }
  78 
  79   // Note: When we get profiling during stage-1 compiles, we want to pull
  80   // from more specific profile data which pertains to this inlining.
  81   // Right now, ignore the information in jvms->caller(), and do method[bci].
  82   ciCallProfile profile = caller->call_profile_at_bci(bci);
  83 
  84   // See how many times this site has been invoked.
  85   int site_count = profile.count();
  86   int receiver_count = -1;


 105       }
 106     }
 107     if (callee->is_method_handle_intrinsic()) {
 108       log->print(" method_handle_intrinsic='1'");
 109     }
 110     log->end_elem();
 111   }
 112 
 113   // Special case the handling of certain common, profitable library
 114   // methods.  If these methods are replaced with specialized code,
 115   // then we return it as the inlined version of the call.
 116   // We do this before the strict f.p. check below because the
 117   // intrinsics handle strict f.p. correctly.
 118   CallGenerator* cg_intrinsic = NULL;
 119   if (allow_inline && allow_intrinsics) {
 120     CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
 121     if (cg != NULL) {
 122       if (cg->is_predicated()) {
 123         // Code without intrinsic but, hopefully, inlined.
 124         CallGenerator* inline_cg = this->call_generator(callee,
 125               vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
 126         if (inline_cg != NULL) {
 127           cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);
 128         }
 129       }
 130 
 131       // If intrinsic does the virtual dispatch, we try to use the type profile
 132       // first, and hopefully inline it as the regular virtual call below.
 133       // We will retry the intrinsic if nothing had claimed it afterwards.
 134       if (cg->does_virtual_dispatch()) {
 135         cg_intrinsic = cg;
 136         cg = NULL;
 137       } else {
 138         return cg;
 139       }
 140     }
 141   }
 142 
 143   // Do method handle calls.
 144   // NOTE: This must happen before normal inlining logic below since
 145   // MethodHandle.invoke* are native methods which obviously don't


 151   }
 152 
 153   // Do not inline strict fp into non-strict code, or the reverse
 154   if (caller->is_strict() ^ callee->is_strict()) {
 155     allow_inline = false;
 156   }
 157 
 158   // Attempt to inline...
 159   if (allow_inline) {
 160     // The profile data is only partly attributable to this caller,
 161     // scale back the call site information.
 162     float past_uses = jvms->method()->scale_count(site_count, prof_factor);
 163     // This is the number of times we expect the call code to be used.
 164     float expected_uses = past_uses;
 165 
 166     // Try inlining a bytecoded method:
 167     if (!call_does_dispatch) {
 168       InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
 169       WarmCallInfo scratch_ci;
 170       bool should_delay = false;
 171       WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
 172       assert(ci != &scratch_ci, "do not let this pointer escape");
 173       bool allow_inline   = (ci != NULL && !ci->is_cold());
 174       bool require_inline = (allow_inline && ci->is_hot());
 175 
 176       if (allow_inline) {
 177         CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
 178 
 179         if (require_inline && cg != NULL) {
 180           // Delay the inlining of this method to give us the
 181           // opportunity to perform some high level optimizations
 182           // first.
 183           if (should_delay_string_inlining(callee, jvms)) {
 184             assert(!delayed_forbidden, "strange");
 185             return CallGenerator::for_string_late_inline(callee, cg);
 186           } else if (should_delay_boxing_inlining(callee, jvms)) {
 187             assert(!delayed_forbidden, "strange");
 188             return CallGenerator::for_boxing_late_inline(callee, cg);
 189           } else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) {
 190             return CallGenerator::for_late_inline(callee, cg);
 191           }
 192         }
 193         if (cg == NULL || should_delay) {
 194           // Fall through.
 195         } else if (require_inline || !InlineWarmCalls) {
 196           return cg;
 197         } else {
 198           CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor);
 199           return CallGenerator::for_warm_call(ci, cold_cg, cg);
 200         }
 201       }
 202     }
 203 
 204     // Try using the type profile.
 205     if (call_does_dispatch && site_count > 0 && receiver_count > 0) {
 206       // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
 207       bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
 208       ciMethod* receiver_method = NULL;
 209 
 210       int morphism = profile.morphism();
 211       if (speculative_receiver_type != NULL) {
 212         if (!too_many_traps(caller, bci, Deoptimization::Reason_speculate_class_check)) {
 213           // We have a speculative type, we should be able to resolve
 214           // the call. We do that before looking at the profiling at
 215           // this invoke because it may lead to bimorphic inlining which
 216           // a speculative type should help us avoid.
 217           receiver_method = callee->resolve_invoke(jvms->method()->holder(),
 218                                                    speculative_receiver_type);


 221           } else {
 222             morphism = 1;
 223           }
 224         } else {
 225           // speculation failed before. Use profiling at the call
 226           // (could allow bimorphic inlining for instance).
 227           speculative_receiver_type = NULL;
 228         }
 229       }
 230       if (receiver_method == NULL &&
 231           (have_major_receiver || morphism == 1 ||
 232            (morphism == 2 && UseBimorphicInlining))) {
 233         // receiver_method = profile.method();
 234         // Profiles do not suggest methods now.  Look it up in the major receiver.
 235         receiver_method = callee->resolve_invoke(jvms->method()->holder(),
 236                                                       profile.receiver(0));
 237       }
 238       if (receiver_method != NULL) {
 239         // The single majority receiver sufficiently outweighs the minority.
 240         CallGenerator* hit_cg = this->call_generator(receiver_method,
 241               vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
 242         if (hit_cg != NULL) {
 243           // Look up second receiver.
 244           CallGenerator* next_hit_cg = NULL;
 245           ciMethod* next_receiver_method = NULL;
 246           if (morphism == 2 && UseBimorphicInlining) {
 247             next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
 248                                                                profile.receiver(1));
 249             if (next_receiver_method != NULL) {
 250               next_hit_cg = this->call_generator(next_receiver_method,
 251                                   vtable_index, !call_does_dispatch, jvms,
 252                                   allow_inline, prof_factor);
 253               if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
 254                   have_major_receiver && UseOnlyInlinedBimorphic) {
 255                   // Skip if we can't inline second receiver's method
 256                   next_hit_cg = NULL;
 257               }
 258             }
 259           }
 260           CallGenerator* miss_cg;
 261           Deoptimization::DeoptReason reason = morphism == 2 ?
 262             Deoptimization::Reason_bimorphic : Deoptimization::reason_class_check(speculative_receiver_type != NULL);
 263           if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
 264               !too_many_traps(caller, bci, reason)
 265              ) {
 266             // Generate uncommon trap for class check failure path
 267             // in case of monomorphic or bimorphic virtual call site.
 268             miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
 269                         Deoptimization::Action_maybe_recompile);
 270           } else {
 271             // Generate virtual call for class check failure path
 272             // in case of polymorphic virtual call site.


 583     // on array types won't be either.
 584     callee = C->optimize_virtual_call(method(), bci(), klass, holder, orig_callee,
 585                                       receiver_type, is_virtual,
 586                                       call_does_dispatch, vtable_index);  // out-parameters
 587     speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;
 588   }
 589 
 590   // Note:  It's OK to try to inline a virtual call.
 591   // The call generator will not attempt to inline a polymorphic call
 592   // unless it knows how to optimize the receiver dispatch.
 593   bool try_inline = (C->do_inlining() || InlineAccessors);
 594 
 595   // ---------------------
 596   dec_sp(nargs);              // Temporarily pop args for JVM state of call
 597   JVMState* jvms = sync_jvms();
 598 
 599   // ---------------------
 600   // Decide call tactic.
 601   // This call checks with CHA, the interpreter profile, intrinsics table, etc.
 602   // It decides whether inlining is desirable or not.
 603   CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);
 604 
 605   // NOTE:  Don't use orig_callee and callee after this point!  Use cg->method() instead.
 606   orig_callee = callee = NULL;
 607 
 608   // ---------------------
 609   // Round double arguments before call
 610   round_double_arguments(cg->method());
 611 
 612   // Feed profiling data for arguments to the type system so it can
 613   // propagate it as speculative types
 614   record_profiled_arguments_for_speculation(cg->method(), bc());
 615 
 616 #ifndef PRODUCT
 617   // bump global counters for calls
 618   count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
 619 
 620   // Record first part of parsing work for this call
 621   parse_histogram()->record_change();
 622 #endif // not PRODUCT
 623 


 634     receiver = record_profiled_receiver_for_speculation(receiver);
 635   }
 636 
 637   // Bump method data counters (We profile *before* the call is made
 638   // because exceptions don't return to the call site.)
 639   profile_call(receiver);
 640 
 641   JVMState* new_jvms = cg->generate(jvms);
 642   if (new_jvms == NULL) {
 643     // When inlining attempt fails (e.g., too many arguments),
 644     // it may contaminate the current compile state, making it
 645     // impossible to pull back and try again.  Once we call
 646     // cg->generate(), we are committed.  If it fails, the whole
 647     // compilation task is compromised.
 648     if (failing())  return;
 649 
 650     // This can happen if a library intrinsic is available, but refuses
 651     // the call site, perhaps because it did not match a pattern the
 652     // intrinsic was expecting to optimize. Should always be possible to
 653     // get a normal java call that may inline in that case
 654     cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
 655     new_jvms = cg->generate(jvms);
 656     if (new_jvms == NULL) {
 657       guarantee(failing(), "call failed to generate:  calls should work");
 658       return;
 659     }
 660   }
 661 
 662   if (cg->is_inline()) {
 663     // Accumulate has_loops estimate
 664     C->set_has_loops(C->has_loops() || cg->method()->has_loops());
 665     C->env()->notice_inlined_method(cg->method());
 666   }
 667 
 668   // Reset parser state from [new_]jvms, which now carries results of the call.
 669   // Return value (if any) is already pushed on the stack by the cg.
 670   add_exception_states_from(new_jvms);
 671   if (new_jvms->map()->control() == top()) {
 672     stop_and_kill_map();
 673   } else {
 674     assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");




  46     outputStream* out = tty;
  47     if (!C->print_inlining()) {
  48       if (!PrintOpto && !PrintCompilation) {
  49         method->print_short_name();
  50         tty->cr();
  51       }
  52       CompileTask::print_inlining_tty(prof_method, depth, bci);
  53     } else {
  54       out = C->print_inlining_stream();
  55     }
  56     CompileTask::print_inline_indent(depth, out);
  57     out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
  58     stringStream ss;
  59     prof_klass->name()->print_symbol_on(&ss);
  60     out->print("%s", ss.as_string());
  61     out->cr();
  62   }
  63 }
  64 
  65 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
  66                                        JVMState* jvms, bool allow_inline, bool is_mh_inline,
  67                                        float prof_factor, ciKlass* speculative_receiver_type,
  68                                        bool allow_intrinsics, bool delayed_forbidden) {
  69   ciMethod*       caller   = jvms->method();
  70   int             bci      = jvms->bci();
  71   Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
  72   guarantee(callee != NULL, "failed method resolution");
  73 
  74   // Dtrace currently doesn't work unless all calls are vanilla
  75   if (env()->dtrace_method_probes()) {
  76     allow_inline = false;
  77   }
  78 
  79   // Note: When we get profiling during stage-1 compiles, we want to pull
  80   // from more specific profile data which pertains to this inlining.
  81   // Right now, ignore the information in jvms->caller(), and do method[bci].
  82   ciCallProfile profile = caller->call_profile_at_bci(bci);
  83 
  84   // See how many times this site has been invoked.
  85   int site_count = profile.count();
  86   int receiver_count = -1;


 105       }
 106     }
 107     if (callee->is_method_handle_intrinsic()) {
 108       log->print(" method_handle_intrinsic='1'");
 109     }
 110     log->end_elem();
 111   }
 112 
 113   // Special case the handling of certain common, profitable library
 114   // methods.  If these methods are replaced with specialized code,
 115   // then we return it as the inlined version of the call.
 116   // We do this before the strict f.p. check below because the
 117   // intrinsics handle strict f.p. correctly.
 118   CallGenerator* cg_intrinsic = NULL;
 119   if (allow_inline && allow_intrinsics) {
 120     CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
 121     if (cg != NULL) {
 122       if (cg->is_predicated()) {
 123         // Code without intrinsic but, hopefully, inlined.
 124         CallGenerator* inline_cg = this->call_generator(callee,
 125               vtable_index, call_does_dispatch, jvms, allow_inline, false /* is_mh_inline */, prof_factor, speculative_receiver_type, false);
 126         if (inline_cg != NULL) {
 127           cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);
 128         }
 129       }
 130 
 131       // If intrinsic does the virtual dispatch, we try to use the type profile
 132       // first, and hopefully inline it as the regular virtual call below.
 133       // We will retry the intrinsic if nothing had claimed it afterwards.
 134       if (cg->does_virtual_dispatch()) {
 135         cg_intrinsic = cg;
 136         cg = NULL;
 137       } else {
 138         return cg;
 139       }
 140     }
 141   }
 142 
 143   // Do method handle calls.
 144   // NOTE: This must happen before normal inlining logic below since
 145   // MethodHandle.invoke* are native methods which obviously don't


 151   }
 152 
 153   // Do not inline strict fp into non-strict code, or the reverse
 154   if (caller->is_strict() ^ callee->is_strict()) {
 155     allow_inline = false;
 156   }
 157 
 158   // Attempt to inline...
 159   if (allow_inline) {
 160     // The profile data is only partly attributable to this caller,
 161     // scale back the call site information.
 162     float past_uses = jvms->method()->scale_count(site_count, prof_factor);
 163     // This is the number of times we expect the call code to be used.
 164     float expected_uses = past_uses;
 165 
 166     // Try inlining a bytecoded method:
 167     if (!call_does_dispatch) {
 168       InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
 169       WarmCallInfo scratch_ci;
 170       bool should_delay = false;
 171       WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay, is_mh_inline);
 172       assert(ci != &scratch_ci, "do not let this pointer escape");
 173       bool allow_inline   = (ci != NULL && !ci->is_cold());
 174       bool require_inline = (allow_inline && ci->is_hot());
 175 
 176       if (allow_inline) {
 177         CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
 178 
 179         if (require_inline && cg != NULL) {
 180           // Delay the inlining of this method to give us the
 181           // opportunity to perform some high level optimizations
 182           // first.
 183           if (should_delay_string_inlining(callee, jvms)) {
 184             assert(!delayed_forbidden, "strange");
 185             return CallGenerator::for_string_late_inline(callee, cg);
 186           } else if (should_delay_boxing_inlining(callee, jvms)) {
 187             assert(!delayed_forbidden, "strange");
 188             return CallGenerator::for_boxing_late_inline(callee, cg);
 189           } else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) {
 190             return CallGenerator::for_late_inline(callee, cg);
 191           }
 192         }
 193         if (cg == NULL || should_delay) {
 194           // Fall through.
 195         } else if (require_inline || !InlineWarmCalls) {
 196           return cg;
 197         } else {
 198           CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false /* is_mh_inline */, is_mh_inline, prof_factor);
 199           return CallGenerator::for_warm_call(ci, cold_cg, cg);
 200         }
 201       }
 202     }
 203 
 204     // Try using the type profile.
 205     if (call_does_dispatch && site_count > 0 && receiver_count > 0) {
 206       // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
 207       bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
 208       ciMethod* receiver_method = NULL;
 209 
 210       int morphism = profile.morphism();
 211       if (speculative_receiver_type != NULL) {
 212         if (!too_many_traps(caller, bci, Deoptimization::Reason_speculate_class_check)) {
 213           // We have a speculative type, we should be able to resolve
 214           // the call. We do that before looking at the profiling at
 215           // this invoke because it may lead to bimorphic inlining which
 216           // a speculative type should help us avoid.
 217           receiver_method = callee->resolve_invoke(jvms->method()->holder(),
 218                                                    speculative_receiver_type);


 221           } else {
 222             morphism = 1;
 223           }
 224         } else {
 225           // speculation failed before. Use profiling at the call
 226           // (could allow bimorphic inlining for instance).
 227           speculative_receiver_type = NULL;
 228         }
 229       }
 230       if (receiver_method == NULL &&
 231           (have_major_receiver || morphism == 1 ||
 232            (morphism == 2 && UseBimorphicInlining))) {
 233         // receiver_method = profile.method();
 234         // Profiles do not suggest methods now.  Look it up in the major receiver.
 235         receiver_method = callee->resolve_invoke(jvms->method()->holder(),
 236                                                       profile.receiver(0));
 237       }
 238       if (receiver_method != NULL) {
 239         // The single majority receiver sufficiently outweighs the minority.
 240         CallGenerator* hit_cg = this->call_generator(receiver_method,
 241               vtable_index, !call_does_dispatch, jvms, allow_inline, false /* is_mh_inline */, prof_factor);
 242         if (hit_cg != NULL) {
 243           // Look up second receiver.
 244           CallGenerator* next_hit_cg = NULL;
 245           ciMethod* next_receiver_method = NULL;
 246           if (morphism == 2 && UseBimorphicInlining) {
 247             next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
 248                                                                profile.receiver(1));
 249             if (next_receiver_method != NULL) {
 250               next_hit_cg = this->call_generator(next_receiver_method,
 251                                   vtable_index, !call_does_dispatch, jvms,
 252                                   allow_inline, false /* is_mh_inline */, prof_factor);
 253               if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
 254                   have_major_receiver && UseOnlyInlinedBimorphic) {
 255                   // Skip if we can't inline second receiver's method
 256                   next_hit_cg = NULL;
 257               }
 258             }
 259           }
 260           CallGenerator* miss_cg;
 261           Deoptimization::DeoptReason reason = morphism == 2 ?
 262             Deoptimization::Reason_bimorphic : Deoptimization::reason_class_check(speculative_receiver_type != NULL);
 263           if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
 264               !too_many_traps(caller, bci, reason)
 265              ) {
 266             // Generate uncommon trap for class check failure path
 267             // in case of monomorphic or bimorphic virtual call site.
 268             miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
 269                         Deoptimization::Action_maybe_recompile);
 270           } else {
 271             // Generate virtual call for class check failure path
 272             // in case of polymorphic virtual call site.


 583     // on array types won't be either.
 584     callee = C->optimize_virtual_call(method(), bci(), klass, holder, orig_callee,
 585                                       receiver_type, is_virtual,
 586                                       call_does_dispatch, vtable_index);  // out-parameters
 587     speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;
 588   }
 589 
 590   // Note:  It's OK to try to inline a virtual call.
 591   // The call generator will not attempt to inline a polymorphic call
 592   // unless it knows how to optimize the receiver dispatch.
 593   bool try_inline = (C->do_inlining() || InlineAccessors);
 594 
 595   // ---------------------
 596   dec_sp(nargs);              // Temporarily pop args for JVM state of call
 597   JVMState* jvms = sync_jvms();
 598 
 599   // ---------------------
 600   // Decide call tactic.
 601   // This call checks with CHA, the interpreter profile, intrinsics table, etc.
 602   // It decides whether inlining is desirable or not.
 603   CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, false /* is_mh_inline */, prof_factor(), speculative_receiver_type);
 604 
 605   // NOTE:  Don't use orig_callee and callee after this point!  Use cg->method() instead.
 606   orig_callee = callee = NULL;
 607 
 608   // ---------------------
 609   // Round double arguments before call
 610   round_double_arguments(cg->method());
 611 
 612   // Feed profiling data for arguments to the type system so it can
 613   // propagate it as speculative types
 614   record_profiled_arguments_for_speculation(cg->method(), bc());
 615 
 616 #ifndef PRODUCT
 617   // bump global counters for calls
 618   count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
 619 
 620   // Record first part of parsing work for this call
 621   parse_histogram()->record_change();
 622 #endif // not PRODUCT
 623 


 634     receiver = record_profiled_receiver_for_speculation(receiver);
 635   }
 636 
 637   // Bump method data counters (We profile *before* the call is made
 638   // because exceptions don't return to the call site.)
 639   profile_call(receiver);
 640 
 641   JVMState* new_jvms = cg->generate(jvms);
 642   if (new_jvms == NULL) {
 643     // When inlining attempt fails (e.g., too many arguments),
 644     // it may contaminate the current compile state, making it
 645     // impossible to pull back and try again.  Once we call
 646     // cg->generate(), we are committed.  If it fails, the whole
 647     // compilation task is compromised.
 648     if (failing())  return;
 649 
 650     // This can happen if a library intrinsic is available, but refuses
 651     // the call site, perhaps because it did not match a pattern the
 652     // intrinsic was expecting to optimize. Should always be possible to
 653     // get a normal java call that may inline in that case
 654     cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, false /* is_mh_inline */, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
 655     new_jvms = cg->generate(jvms);
 656     if (new_jvms == NULL) {
 657       guarantee(failing(), "call failed to generate:  calls should work");
 658       return;
 659     }
 660   }
 661 
 662   if (cg->is_inline()) {
 663     // Accumulate has_loops estimate
 664     C->set_has_loops(C->has_loops() || cg->method()->has_loops());
 665     C->env()->notice_inlined_method(cg->method());
 666   }
 667 
 668   // Reset parser state from [new_]jvms, which now carries results of the call.
 669   // Return value (if any) is already pushed on the stack by the cg.
 670   add_exception_states_from(new_jvms);
 671   if (new_jvms->map()->control() == top()) {
 672     stop_and_kill_map();
 673   } else {
 674     assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");


< prev index next >