src/share/vm/opto/doCall.cpp

Print this page




  44 #ifndef PRODUCT
  45 void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
  46   if (TraceTypeProfile || PrintInlining || PrintOptoInlining) {
  47     if (!PrintInlining) {
  48       if (!PrintOpto && !PrintCompilation) {
  49         method->print_short_name();
  50         tty->cr();
  51       }
  52       CompileTask::print_inlining(prof_method, depth, bci);
  53     }
  54     CompileTask::print_inline_indent(depth);
  55     tty->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
  56     prof_klass->name()->print_symbol();
  57     tty->cr();
  58   }
  59 }
  60 #endif
  61 
  62 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
  63                                        JVMState* jvms, bool allow_inline,
  64                                        float prof_factor) {
  65   ciMethod*       caller   = jvms->method();
  66   int             bci      = jvms->bci();
  67   Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
  68   guarantee(call_method != NULL, "failed method resolution");
  69 
  70   // Dtrace currently doesn't work unless all calls are vanilla
  71   if (env()->dtrace_method_probes()) {
  72     allow_inline = false;
  73   }
  74 
  75   // Note: When we get profiling during stage-1 compiles, we want to pull
  76   // from more specific profile data which pertains to this inlining.
  77   // Right now, ignore the information in jvms->caller(), and do method[bci].
  78   ciCallProfile profile = caller->call_profile_at_bci(bci);
  79 
  80   // See how many times this site has been invoked.
  81   int site_count = profile.count();
  82   int receiver_count = -1;
  83   if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) {
  84     // Receivers in the profile structure are ordered by call counts


  91     int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
  92     int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
  93     log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
  94                     log->identify(call_method), site_count, prof_factor);
  95     if (call_is_virtual)  log->print(" virtual='1'");
  96     if (allow_inline)     log->print(" inline='1'");
  97     if (receiver_count >= 0) {
  98       log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
  99       if (profile.has_receiver(1)) {
 100         log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
 101       }
 102     }
 103     log->end_elem();
 104   }
 105 
 106   // Special case the handling of certain common, profitable library
 107   // methods.  If these methods are replaced with specialized code,
 108   // then we return it as the inlined version of the call.
 109   // We do this before the strict f.p. check below because the
 110   // intrinsics handle strict f.p. correctly.
 111   if (allow_inline) {
 112     CallGenerator* cg = find_intrinsic(call_method, call_is_virtual);
 113     if (cg != NULL)  return cg;
 114   }
 115 
 116   // Do method handle calls.
 117   // NOTE: This must happen before normal inlining logic below since
 118   // MethodHandle.invoke* are native methods which obviously don't
 119   // have bytecodes and so normal inlining fails.
 120   if (call_method->is_method_handle_invoke()) {
 121     if (bytecode != Bytecodes::_invokedynamic) {
 122       GraphKit kit(jvms);
 123       Node* method_handle = kit.argument(0);
 124       return CallGenerator::for_method_handle_call(method_handle, jvms, caller, call_method, profile);
 125     }
 126     else {
 127       return CallGenerator::for_invokedynamic_call(jvms, caller, call_method, profile);
 128     }
 129   }
 130 
 131   // Do not inline strict fp into non-strict code, or the reverse


 438 #endif // not PRODUCT
 439 
 440   assert(jvms == this->jvms(), "still operating on the right JVMS");
 441   assert(jvms_in_sync(),       "jvms must carry full info into CG");
 442 
 443   // save across call, for a subsequent cast_not_null.
 444   Node* receiver = has_receiver ? argument(0) : NULL;
 445 
 446   // Bump method data counters (We profile *before* the call is made
 447   // because exceptions don't return to the call site.)
 448   profile_call(receiver);
 449 
 450   JVMState* new_jvms;
 451   if ((new_jvms = cg->generate(jvms)) == NULL) {
 452     // When inlining attempt fails (e.g., too many arguments),
 453     // it may contaminate the current compile state, making it
 454     // impossible to pull back and try again.  Once we call
 455     // cg->generate(), we are committed.  If it fails, the whole
 456     // compilation task is compromised.
 457     if (failing())  return;
 458 #ifndef PRODUCT
 459     if (PrintOpto || PrintOptoInlining || PrintInlining) {
 460       // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
 461       if (cg->is_intrinsic() && call_method->code_size() > 0) {
 462         tty->print("Bailed out of intrinsic, will not inline: ");
 463         call_method->print_name(); tty->cr();
 464       }
 465     }
 466 #endif
 467     // This can happen if a library intrinsic is available, but refuses
 468     // the call site, perhaps because it did not match a pattern the
 469     // intrinsic was expecting to optimize.  The fallback position is
 470     // to call out-of-line.
 471     try_inline = false;  // Inline tactic bailed out.







 472     cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
 473     if ((new_jvms = cg->generate(jvms)) == NULL) {
 474       guarantee(failing(), "call failed to generate:  calls should work");
 475       return;

 476     }
 477   }
 478 
 479   if (cg->is_inline()) {
 480     // Accumulate has_loops estimate
 481     C->set_has_loops(C->has_loops() || call_method->has_loops());
 482     C->env()->notice_inlined_method(call_method);
 483   }
 484 
 485   // Reset parser state from [new_]jvms, which now carries results of the call.
 486   // Return value (if any) is already pushed on the stack by the cg.
 487   add_exception_states_from(new_jvms);
 488   if (new_jvms->map()->control() == top()) {
 489     stop_and_kill_map();
 490   } else {
 491     assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
 492     set_jvms(new_jvms);
 493   }
 494 
 495   if (!stopped()) {




  44 #ifndef PRODUCT
  45 void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
  46   if (TraceTypeProfile || PrintInlining || PrintOptoInlining) {
  47     if (!PrintInlining) {
  48       if (!PrintOpto && !PrintCompilation) {
  49         method->print_short_name();
  50         tty->cr();
  51       }
  52       CompileTask::print_inlining(prof_method, depth, bci);
  53     }
  54     CompileTask::print_inline_indent(depth);
  55     tty->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
  56     prof_klass->name()->print_symbol();
  57     tty->cr();
  58   }
  59 }
  60 #endif
  61 
  62 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
  63                                        JVMState* jvms, bool allow_inline,
  64                                        float prof_factor, bool allow_intrinsics) {
  65   ciMethod*       caller   = jvms->method();
  66   int             bci      = jvms->bci();
  67   Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
  68   guarantee(call_method != NULL, "failed method resolution");
  69 
  70   // Dtrace currently doesn't work unless all calls are vanilla
  71   if (env()->dtrace_method_probes()) {
  72     allow_inline = false;
  73   }
  74 
  75   // Note: When we get profiling during stage-1 compiles, we want to pull
  76   // from more specific profile data which pertains to this inlining.
  77   // Right now, ignore the information in jvms->caller(), and do method[bci].
  78   ciCallProfile profile = caller->call_profile_at_bci(bci);
  79 
  80   // See how many times this site has been invoked.
  81   int site_count = profile.count();
  82   int receiver_count = -1;
  83   if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) {
  84     // Receivers in the profile structure are ordered by call counts


  91     int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
  92     int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
  93     log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
  94                     log->identify(call_method), site_count, prof_factor);
  95     if (call_is_virtual)  log->print(" virtual='1'");
  96     if (allow_inline)     log->print(" inline='1'");
  97     if (receiver_count >= 0) {
  98       log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
  99       if (profile.has_receiver(1)) {
 100         log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
 101       }
 102     }
 103     log->end_elem();
 104   }
 105 
 106   // Special case the handling of certain common, profitable library
 107   // methods.  If these methods are replaced with specialized code,
 108   // then we return it as the inlined version of the call.
 109   // We do this before the strict f.p. check below because the
 110   // intrinsics handle strict f.p. correctly.
 111   if (allow_inline && allow_intrinsics) {
 112     CallGenerator* cg = find_intrinsic(call_method, call_is_virtual);
 113     if (cg != NULL)  return cg;
 114   }
 115 
 116   // Do method handle calls.
 117   // NOTE: This must happen before normal inlining logic below since
 118   // MethodHandle.invoke* are native methods which obviously don't
 119   // have bytecodes and so normal inlining fails.
 120   if (call_method->is_method_handle_invoke()) {
 121     if (bytecode != Bytecodes::_invokedynamic) {
 122       GraphKit kit(jvms);
 123       Node* method_handle = kit.argument(0);
 124       return CallGenerator::for_method_handle_call(method_handle, jvms, caller, call_method, profile);
 125     }
 126     else {
 127       return CallGenerator::for_invokedynamic_call(jvms, caller, call_method, profile);
 128     }
 129   }
 130 
 131   // Do not inline strict fp into non-strict code, or the reverse


 438 #endif // not PRODUCT
 439 
 440   assert(jvms == this->jvms(), "still operating on the right JVMS");
 441   assert(jvms_in_sync(),       "jvms must carry full info into CG");
 442 
 443   // save across call, for a subsequent cast_not_null.
 444   Node* receiver = has_receiver ? argument(0) : NULL;
 445 
 446   // Bump method data counters (We profile *before* the call is made
 447   // because exceptions don't return to the call site.)
 448   profile_call(receiver);
 449 
 450   JVMState* new_jvms;
 451   if ((new_jvms = cg->generate(jvms)) == NULL) {
 452     // When inlining attempt fails (e.g., too many arguments),
 453     // it may contaminate the current compile state, making it
 454     // impossible to pull back and try again.  Once we call
 455     // cg->generate(), we are committed.  If it fails, the whole
 456     // compilation task is compromised.
 457     if (failing())  return;
 458 








 459     // This can happen if a library intrinsic is available, but refuses
 460     // the call site, perhaps because it did not match a pattern the
 461     // intrinsic was expecting to optimize. Should always be possible to
 462     // get a normal java call that may inline in that case
 463     cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor(), false);
 464     if ((new_jvms = cg->generate(jvms)) == NULL) {
 465 
 466        // Once again: If it fails, the whole compilation task is compromised.
 467        if (failing())  return;
 468 
 469        // last fallback, out-of-line 
 470        try_inline = false;
 471        cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
 472        if ((new_jvms = cg->generate(jvms)) == NULL) {
 473          guarantee(failing(), "call failed to generate:  calls should work");
 474          return;
 475        }
 476     }
 477   }
 478 
 479   if (cg->is_inline()) {
 480     // Accumulate has_loops estimate
 481     C->set_has_loops(C->has_loops() || call_method->has_loops());
 482     C->env()->notice_inlined_method(call_method);
 483   }
 484 
 485   // Reset parser state from [new_]jvms, which now carries results of the call.
 486   // Return value (if any) is already pushed on the stack by the cg.
 487   add_exception_states_from(new_jvms);
 488   if (new_jvms->map()->control() == top()) {
 489     stop_and_kill_map();
 490   } else {
 491     assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
 492     set_jvms(new_jvms);
 493   }
 494 
 495   if (!stopped()) {