src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/callGenerator.cpp

Print this page
rev 6132 : 8007988: PrintInlining output is inconsistent with incremental inlining
Summary: fix duplicate and conflicting inlining output
Reviewed-by:
rev 6133 : 8005079: fix LogCompilation for incremental inlining
Summary: report late inlining as part of the rest of the inlining output
Reviewed-by:
rev 6134 : imported patch logcompilation-reviews
rev 6135 : [mq]: logcompilation-reviews2


 249 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 250   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 251   float past_uses = m->interpreter_invocation_count();
 252   float expected_uses = past_uses;
 253   return new ParseGenerator(m, expected_uses, true);
 254 }
 255 
 256 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 257   assert(!m->is_abstract(), "for_direct_call mismatch");
 258   return new DirectCallGenerator(m, separate_io_proj);
 259 }
 260 
 261 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 262   assert(!m->is_static(), "for_virtual_call mismatch");
 263   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 264   return new VirtualCallGenerator(m, vtable_index);
 265 }
 266 
 267 // Allow inlining decisions to be delayed
 268 class LateInlineCallGenerator : public DirectCallGenerator {




 269  protected:
 270   CallGenerator* _inline_cg;
 271 
 272   virtual bool do_late_inline_check(JVMState* jvms) { return true; }
 273 
 274  public:
 275   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 276     DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
 277 
 278   virtual bool is_late_inline() const { return true; }
 279 
 280   // Convert the CallStaticJava into an inline
 281   virtual void do_late_inline();
 282 
 283   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 284     Compile *C = Compile::current();
 285 


 286     // Record that this call site should be revisited once the main
 287     // parse is finished.
 288     if (!is_mh_late_inline()) {
 289       C->add_late_inline(this);
 290     }
 291 
 292     // Emit the CallStaticJava and request separate projections so
 293     // that the late inlining logic can distinguish between fall
 294     // through and exceptional uses of the memory and io projections
 295     // as is done for allocations and macro expansion.
 296     return DirectCallGenerator::generate(jvms, parent_parser);
 297   }
 298 
 299   virtual void print_inlining_late(const char* msg) {
 300     CallNode* call = call_node();
 301     Compile* C = Compile::current();
 302     C->print_inlining_assert_ready();
 303     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 304     C->print_inlining_move_to(this);
 305     C->print_inlining_update_delayed(this);
 306   }








 307 };
 308 
 309 void LateInlineCallGenerator::do_late_inline() {
 310   // Can't inline it
 311   CallStaticJavaNode* call = call_node();
 312   if (call == NULL || call->outcnt() == 0 ||
 313       call->in(0) == NULL || call->in(0)->is_top()) {
 314     return;
 315   }
 316 
 317   const TypeTuple *r = call->tf()->domain();
 318   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 319     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 320       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 321       return;
 322     }
 323   }
 324 
 325   if (call->in(TypeFunc::Memory)->is_top()) {
 326     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");


 351 
 352   uint nargs = method()->arg_size();
 353   // blow away old call arguments
 354   Node* top = C->top();
 355   for (uint i1 = 0; i1 < nargs; i1++) {
 356     map->set_req(TypeFunc::Parms + i1, top);
 357   }
 358   jvms->set_map(map);
 359 
 360   // Make enough space in the expression stack to transfer
 361   // the incoming arguments and return value.
 362   map->ensure_stack(jvms, jvms->method()->max_stack());
 363   for (uint i1 = 0; i1 < nargs; i1++) {
 364     map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
 365   }
 366 
 367   C->print_inlining_assert_ready();
 368 
 369   C->print_inlining_move_to(this);
 370 


 371   // This check is done here because for_method_handle_inline() method
 372   // needs jvms for inlined state.
 373   if (!do_late_inline_check(jvms)) {
 374     map->disconnect_inputs(NULL, C);
 375     return;
 376   }
 377 
 378   CompileLog* log = C->log();
 379   if (log != NULL) {
 380     log->head("late_inline method='%d'", log->identify(method()));
 381     JVMState* p = jvms;
 382     while (p != NULL) {
 383       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
 384       p = p->caller();
 385     }
 386     log->tail("late_inline");
 387   }
 388 
 389   // Setup default node notes to be picked up by the inlining
 390   Node_Notes* old_nn = C->default_node_notes();
 391   if (old_nn != NULL) {
 392     Node_Notes* entry_nn = old_nn->clone(C);
 393     entry_nn->set_jvms(jvms);
 394     C->set_default_node_notes(entry_nn);
 395   }
 396 
 397   // Now perform the inlining using the synthesized JVMState
 398   JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
 399   if (new_jvms == NULL)  return;  // no change
 400   if (C->failing())      return;
 401 
 402   // Capture any exceptional control flow
 403   GraphKit kit(new_jvms);
 404 
 405   // Find the result object
 406   Node* result = C->top();
 407   int   result_size = method()->return_type()->size();
 408   if (result_size != 0 && !kit.stopped()) {


 421   return new LateInlineCallGenerator(method, inline_cg);
 422 }
 423 
 424 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 425   ciMethod* _caller;
 426   int _attempt;
 427   bool _input_not_const;
 428 
 429   virtual bool do_late_inline_check(JVMState* jvms);
 430   virtual bool already_attempted() const { return _attempt > 0; }
 431 
 432  public:
 433   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 434     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 435 
 436   virtual bool is_mh_late_inline() const { return true; }
 437 
 438   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 439     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
 440 

 441     if (_input_not_const) {
 442       // inlining won't be possible so no need to enqueue right now.
 443       call_node()->set_generator(this);
 444     } else {
 445       Compile::current()->add_late_inline(this);
 446     }
 447     return new_jvms;
 448   }
 449 };
 450 
 451 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 452 
 453   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 454 
 455   Compile::current()->print_inlining_update_delayed(this);
 456 
 457   if (!_input_not_const) {
 458     _attempt++;
 459   }
 460 
 461   if (cg != NULL) {
 462     assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining");
 463     _inline_cg = cg;
 464     Compile::current()->dec_number_of_mh_late_inlines();
 465     return true;
 466   }
 467 
 468   call_node()->set_generator(this);
 469   return false;
 470 }
 471 
 472 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 473   Compile::current()->inc_number_of_mh_late_inlines();
 474   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 475   return cg;
 476 }
 477 
 478 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 479 
 480  public:
 481   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 482     LateInlineCallGenerator(method, inline_cg) {}
 483 
 484   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 485     Compile *C = Compile::current();



 486     C->add_string_late_inline(this);
 487 
 488     JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
 489     return new_jvms;
 490   }
 491 
 492   virtual bool is_string_late_inline() const { return true; }
 493 };
 494 
 495 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 496   return new LateInlineStringCallGenerator(method, inline_cg);
 497 }
 498 
 499 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 500 
 501  public:
 502   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 503     LateInlineCallGenerator(method, inline_cg) {}
 504 
 505   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 506     Compile *C = Compile::current();
 507 


 508     C->add_boxing_late_inline(this);
 509 
 510     JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
 511     return new_jvms;
 512   }
 513 };
 514 
 515 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 516   return new LateInlineBoxingCallGenerator(method, inline_cg);
 517 }
 518 
 519 //---------------------------WarmCallGenerator--------------------------------
 520 // Internal class which handles initial deferral of inlining decisions.
 521 class WarmCallGenerator : public CallGenerator {
 522   WarmCallInfo*   _call_info;
 523   CallGenerator*  _if_cold;
 524   CallGenerator*  _if_hot;
 525   bool            _is_virtual;   // caches virtuality of if_cold
 526   bool            _is_inline;    // caches inline-ness of if_hot
 527 


 769   vmIntrinsics::ID iid = callee->intrinsic_id();
 770   input_not_const = true;
 771   switch (iid) {
 772   case vmIntrinsics::_invokeBasic:
 773     {
 774       // Get MethodHandle receiver:
 775       Node* receiver = kit.argument(0);
 776       if (receiver->Opcode() == Op_ConP) {
 777         input_not_const = false;
 778         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 779         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 780         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
 781         const int vtable_index = Method::invalid_vtable_index;
 782         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
 783         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 784         if (cg != NULL && cg->is_inline())
 785           return cg;
 786       } else {
 787         const char* msg = "receiver not constant";
 788         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);

 789       }
 790     }
 791     break;
 792 
 793   case vmIntrinsics::_linkToVirtual:
 794   case vmIntrinsics::_linkToStatic:
 795   case vmIntrinsics::_linkToSpecial:
 796   case vmIntrinsics::_linkToInterface:
 797     {
 798       // Get MemberName argument:
 799       Node* member_name = kit.argument(callee->arg_size() - 1);
 800       if (member_name->Opcode() == Op_ConP) {
 801         input_not_const = false;
 802         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 803         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 804 
 805         // In lamda forms we erase signature types to avoid resolving issues
 806         // involving class loaders.  When we optimize a method handle invoke
 807         // to a direct call we must cast the receiver and arguments to its
 808         // actual types.


 841         ciKlass* speculative_receiver_type = NULL;
 842         if (is_virtual_or_interface) {
 843           ciInstanceKlass* klass = target->holder();
 844           Node*             receiver_node = kit.argument(0);
 845           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 846           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 847           target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type,
 848                                             is_virtual,
 849                                             call_does_dispatch, vtable_index);  // out-parameters
 850           // We lack profiling at this call but type speculation may
 851           // provide us with a type
 852           speculative_receiver_type = receiver_type->speculative_type();
 853         }
 854         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
 855         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 856         if (cg != NULL && cg->is_inline())
 857           return cg;
 858       } else {
 859         const char* msg = "member_name not constant";
 860         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);

 861       }
 862     }
 863     break;
 864 
 865   default:
 866     fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
 867     break;
 868   }
 869   return NULL;
 870 }
 871 
 872 
 873 //------------------------PredictedIntrinsicGenerator------------------------------
 874 // Internal class which handles all predicted Intrinsic calls.
 875 class PredictedIntrinsicGenerator : public CallGenerator {
 876   CallGenerator* _intrinsic;
 877   CallGenerator* _cg;
 878 
 879 public:
 880   PredictedIntrinsicGenerator(CallGenerator* intrinsic,




 249 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 250   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 251   float past_uses = m->interpreter_invocation_count();
 252   float expected_uses = past_uses;
 253   return new ParseGenerator(m, expected_uses, true);
 254 }
 255 
 256 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 257   assert(!m->is_abstract(), "for_direct_call mismatch");
 258   return new DirectCallGenerator(m, separate_io_proj);
 259 }
 260 
 261 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 262   assert(!m->is_static(), "for_virtual_call mismatch");
 263   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 264   return new VirtualCallGenerator(m, vtable_index);
 265 }
 266 
 267 // Allow inlining decisions to be delayed
 268 class LateInlineCallGenerator : public DirectCallGenerator {
 269  private:
 270   // unique id for log compilation
 271   jlong _unique_id;
 272 
 273  protected:
 274   CallGenerator* _inline_cg;

 275   virtual bool do_late_inline_check(JVMState* jvms) { return true; }
 276 
 277  public:
 278   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 279     DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {}
 280 
 281   virtual bool is_late_inline() const { return true; }
 282 
 283   // Convert the CallStaticJava into an inline
 284   virtual void do_late_inline();
 285 
 286   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 287     Compile *C = Compile::current();
 288 
 289     C->log_inline_id(this);
 290 
 291     // Record that this call site should be revisited once the main
 292     // parse is finished.
 293     if (!is_mh_late_inline()) {
 294       C->add_late_inline(this);
 295     }
 296 
 297     // Emit the CallStaticJava and request separate projections so
 298     // that the late inlining logic can distinguish between fall
 299     // through and exceptional uses of the memory and io projections
 300     // as is done for allocations and macro expansion.
 301     return DirectCallGenerator::generate(jvms, parent_parser);
 302   }
 303 
 304   virtual void print_inlining_late(const char* msg) {
 305     CallNode* call = call_node();
 306     Compile* C = Compile::current();
 307     C->print_inlining_assert_ready();
 308     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
 309     C->print_inlining_move_to(this);
 310     C->print_inlining_update_delayed(this);
 311   }
 312 
 313   virtual void set_unique_id(jlong id) {
 314     _unique_id = id;
 315   }
 316 
 317   virtual jlong unique_id() const {
 318     return _unique_id;
 319   }
 320 };
 321 
 322 void LateInlineCallGenerator::do_late_inline() {
 323   // Can't inline it
 324   CallStaticJavaNode* call = call_node();
 325   if (call == NULL || call->outcnt() == 0 ||
 326       call->in(0) == NULL || call->in(0)->is_top()) {
 327     return;
 328   }
 329 
 330   const TypeTuple *r = call->tf()->domain();
 331   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 332     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 333       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 334       return;
 335     }
 336   }
 337 
 338   if (call->in(TypeFunc::Memory)->is_top()) {
 339     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");


 364 
 365   uint nargs = method()->arg_size();
 366   // blow away old call arguments
 367   Node* top = C->top();
 368   for (uint i1 = 0; i1 < nargs; i1++) {
 369     map->set_req(TypeFunc::Parms + i1, top);
 370   }
 371   jvms->set_map(map);
 372 
 373   // Make enough space in the expression stack to transfer
 374   // the incoming arguments and return value.
 375   map->ensure_stack(jvms, jvms->method()->max_stack());
 376   for (uint i1 = 0; i1 < nargs; i1++) {
 377     map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
 378   }
 379 
 380   C->print_inlining_assert_ready();
 381 
 382   C->print_inlining_move_to(this);
 383 
 384   C->log_late_inline(this);
 385 
 386   // This check is done here because for_method_handle_inline() method
 387   // needs jvms for inlined state.
 388   if (!do_late_inline_check(jvms)) {
 389     map->disconnect_inputs(NULL, C);
 390     return;
 391   }
 392 











 393   // Setup default node notes to be picked up by the inlining
 394   Node_Notes* old_nn = C->default_node_notes();
 395   if (old_nn != NULL) {
 396     Node_Notes* entry_nn = old_nn->clone(C);
 397     entry_nn->set_jvms(jvms);
 398     C->set_default_node_notes(entry_nn);
 399   }
 400 
 401   // Now perform the inlining using the synthesized JVMState
 402   JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
 403   if (new_jvms == NULL)  return;  // no change
 404   if (C->failing())      return;
 405 
 406   // Capture any exceptional control flow
 407   GraphKit kit(new_jvms);
 408 
 409   // Find the result object
 410   Node* result = C->top();
 411   int   result_size = method()->return_type()->size();
 412   if (result_size != 0 && !kit.stopped()) {


 425   return new LateInlineCallGenerator(method, inline_cg);
 426 }
 427 
 428 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 429   ciMethod* _caller;
 430   int _attempt;
 431   bool _input_not_const;
 432 
 433   virtual bool do_late_inline_check(JVMState* jvms);
 434   virtual bool already_attempted() const { return _attempt > 0; }
 435 
 436  public:
 437   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 438     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 439 
 440   virtual bool is_mh_late_inline() const { return true; }
 441 
 442   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 443     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
 444 
 445     Compile* C = Compile::current();
 446     if (_input_not_const) {
 447       // inlining won't be possible so no need to enqueue right now.
 448       call_node()->set_generator(this);
 449     } else {
 450       C->add_late_inline(this);
 451     }
 452     return new_jvms;
 453   }
 454 };
 455 
 456 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
 457 
 458   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
 459 
 460   Compile::current()->print_inlining_update_delayed(this);
 461 
 462   if (!_input_not_const) {
 463     _attempt++;
 464   }
 465 
 466   if (cg != NULL) {
 467     assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining");
 468     _inline_cg = cg;
 469     Compile::current()->dec_number_of_mh_late_inlines();
 470     return true;
 471   }
 472 
 473   call_node()->set_generator(this);
 474   return false;
 475 }
 476 
 477 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 478   Compile::current()->inc_number_of_mh_late_inlines();
 479   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 480   return cg;
 481 }
 482 
 483 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 484 
 485  public:
 486   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 487     LateInlineCallGenerator(method, inline_cg) {}
 488 
 489   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 490     Compile *C = Compile::current();
 491 
 492     C->log_inline_id(this);
 493 
 494     C->add_string_late_inline(this);
 495 
 496     JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
 497     return new_jvms;
 498   }
 499 
 500   virtual bool is_string_late_inline() const { return true; }
 501 };
 502 
 503 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 504   return new LateInlineStringCallGenerator(method, inline_cg);
 505 }
 506 
 507 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 508 
 509  public:
 510   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 511     LateInlineCallGenerator(method, inline_cg) {}
 512 
 513   virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
 514     Compile *C = Compile::current();
 515 
 516     C->log_inline_id(this);
 517 
 518     C->add_boxing_late_inline(this);
 519 
 520     JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
 521     return new_jvms;
 522   }
 523 };
 524 
 525 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 526   return new LateInlineBoxingCallGenerator(method, inline_cg);
 527 }
 528 
 529 //---------------------------WarmCallGenerator--------------------------------
 530 // Internal class which handles initial deferral of inlining decisions.
 531 class WarmCallGenerator : public CallGenerator {
 532   WarmCallInfo*   _call_info;
 533   CallGenerator*  _if_cold;
 534   CallGenerator*  _if_hot;
 535   bool            _is_virtual;   // caches virtuality of if_cold
 536   bool            _is_inline;    // caches inline-ness of if_hot
 537 


 779   vmIntrinsics::ID iid = callee->intrinsic_id();
 780   input_not_const = true;
 781   switch (iid) {
 782   case vmIntrinsics::_invokeBasic:
 783     {
 784       // Get MethodHandle receiver:
 785       Node* receiver = kit.argument(0);
 786       if (receiver->Opcode() == Op_ConP) {
 787         input_not_const = false;
 788         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 789         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 790         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
 791         const int vtable_index = Method::invalid_vtable_index;
 792         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
 793         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 794         if (cg != NULL && cg->is_inline())
 795           return cg;
 796       } else {
 797         const char* msg = "receiver not constant";
 798         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
 799         C->log_inline_failure(msg);
 800       }
 801     }
 802     break;
 803 
 804   case vmIntrinsics::_linkToVirtual:
 805   case vmIntrinsics::_linkToStatic:
 806   case vmIntrinsics::_linkToSpecial:
 807   case vmIntrinsics::_linkToInterface:
 808     {
 809       // Get MemberName argument:
 810       Node* member_name = kit.argument(callee->arg_size() - 1);
 811       if (member_name->Opcode() == Op_ConP) {
 812         input_not_const = false;
 813         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 814         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 815 
 816         // In lamda forms we erase signature types to avoid resolving issues
 817         // involving class loaders.  When we optimize a method handle invoke
 818         // to a direct call we must cast the receiver and arguments to its
 819         // actual types.


 852         ciKlass* speculative_receiver_type = NULL;
 853         if (is_virtual_or_interface) {
 854           ciInstanceKlass* klass = target->holder();
 855           Node*             receiver_node = kit.argument(0);
 856           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
 857           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
 858           target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type,
 859                                             is_virtual,
 860                                             call_does_dispatch, vtable_index);  // out-parameters
 861           // We lack profiling at this call but type speculation may
 862           // provide us with a type
 863           speculative_receiver_type = receiver_type->speculative_type();
 864         }
 865         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
 866         assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
 867         if (cg != NULL && cg->is_inline())
 868           return cg;
 869       } else {
 870         const char* msg = "member_name not constant";
 871         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
 872         C->log_inline_failure(msg);
 873       }
 874     }
 875     break;
 876 
 877   default:
 878     fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
 879     break;
 880   }
 881   return NULL;
 882 }
 883 
 884 
 885 //------------------------PredictedIntrinsicGenerator------------------------------
 886 // Internal class which handles all predicted Intrinsic calls.
 887 class PredictedIntrinsicGenerator : public CallGenerator {
 888   CallGenerator* _intrinsic;
 889   CallGenerator* _cg;
 890 
 891 public:
 892   PredictedIntrinsicGenerator(CallGenerator* intrinsic,


src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File