< prev index next >

src/hotspot/share/opto/callGenerator.cpp

Print this page




 279   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 280   float past_uses = m->interpreter_invocation_count();
 281   float expected_uses = past_uses;
 282   return new ParseGenerator(m, expected_uses, true);
 283 }
 284 
 285 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 286   assert(!m->is_abstract(), "for_direct_call mismatch");
 287   return new DirectCallGenerator(m, separate_io_proj);
 288 }
 289 
 290 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 291   assert(!m->is_static(), "for_virtual_call mismatch");
 292   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 293   return new VirtualCallGenerator(m, vtable_index);
 294 }
 295 
 296 // Allow inlining decisions to be delayed
 297 class LateInlineCallGenerator : public DirectCallGenerator {
 298  private:
 299   // unique id for log compilation
 300   jlong _unique_id;
 301 
 302  protected:
 303   CallGenerator* _inline_cg;
 304   virtual bool do_late_inline_check(JVMState* jvms) { return true; }
 305 
 306  public:
 307   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 308     DirectCallGenerator(method, true), _unique_id(0), _inline_cg(inline_cg) {}
 309 
 310   virtual bool is_late_inline() const { return true; }
 311 
 312   // Convert the CallStaticJava into an inline
 313   virtual void do_late_inline();
 314 
 315   virtual JVMState* generate(JVMState* jvms) {
 316     Compile *C = Compile::current();
 317 
 318     C->log_inline_id(this);
 319 
 320     // Record that this call site should be revisited once the main
 321     // parse is finished.
 322     if (!is_mh_late_inline()) {
 323       C->add_late_inline(this);
 324     }
 325 
 326     // Emit the CallStaticJava and request separate projections so
 327     // that the late inlining logic can distinguish between fall
 328     // through and exceptional uses of the memory and io projections


 372   // check for unreachable loop
 373   CallProjections callprojs;
 374   call->extract_projections(&callprojs, true);
 375   if (callprojs.fallthrough_catchproj == call->in(0) ||
 376       callprojs.catchall_catchproj == call->in(0) ||
 377       callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
 378       callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
 379       callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
 380       callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
 381       (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
 382       (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
 383     return;
 384   }
 385 
 386   Compile* C = Compile::current();
 387   // Remove inlined methods from Compiler's lists.
 388   if (call->is_macro()) {
 389     C->remove_macro_node(call);
 390   }
 391 







 392   // Make a clone of the JVMState that appropriate to use for driving a parse
 393   JVMState* old_jvms = call->jvms();
 394   JVMState* jvms = old_jvms->clone_shallow(C);
 395   uint size = call->req();
 396   SafePointNode* map = new SafePointNode(size, jvms);
 397   for (uint i1 = 0; i1 < size; i1++) {
 398     map->init_req(i1, call->in(i1));
 399   }
 400 
 401   // Make sure the state is a MergeMem for parsing.
 402   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 403     Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 404     C->initial_gvn()->set_type_bottom(mem);
 405     map->set_req(TypeFunc::Memory, mem);
 406   }
 407 
 408   uint nargs = method()->arg_size();
 409   // blow away old call arguments
 410   Node* top = C->top();
 411   for (uint i1 = 0; i1 < nargs; i1++) {


 444   // Now perform the inlining using the synthesized JVMState
 445   JVMState* new_jvms = _inline_cg->generate(jvms);
 446   if (new_jvms == NULL)  return;  // no change
 447   if (C->failing())      return;
 448 
 449   // Capture any exceptional control flow
 450   GraphKit kit(new_jvms);
 451 
 452   // Find the result object
 453   Node* result = C->top();
 454   int   result_size = method()->return_type()->size();
 455   if (result_size != 0 && !kit.stopped()) {
 456     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 457   }
 458 
 459   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 460   C->env()->notice_inlined_method(_inline_cg->method());
 461   C->set_inlining_progress(true);
 462   C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 463   kit.replace_call(call, result, true);

 464 }
 465 
 466 
 467 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 468   return new LateInlineCallGenerator(method, inline_cg);
 469 }
 470 
 471 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 472   ciMethod* _caller;
 473   int _attempt;
 474   bool _input_not_const;
 475 
 476   virtual bool do_late_inline_check(JVMState* jvms);
 477   virtual bool already_attempted() const { return _attempt > 0; }
 478 
 479  public:
 480   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 481     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 482 
 483   virtual bool is_mh_late_inline() const { return true; }


 534 
 535     C->log_inline_id(this);
 536 
 537     C->add_string_late_inline(this);
 538 
 539     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 540     return new_jvms;
 541   }
 542 
 543   virtual bool is_string_late_inline() const { return true; }
 544 };
 545 
 546 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 547   return new LateInlineStringCallGenerator(method, inline_cg);
 548 }
 549 
 550 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 551 
 552  public:
 553   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 554     LateInlineCallGenerator(method, inline_cg) {}
 555 
 556   virtual JVMState* generate(JVMState* jvms) {
 557     Compile *C = Compile::current();
 558 
 559     C->log_inline_id(this);
 560 
 561     C->add_boxing_late_inline(this);
 562 
 563     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 564     return new_jvms;
 565   }
 566 };
 567 
 568 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 569   return new LateInlineBoxingCallGenerator(method, inline_cg);
 570 }
 571 
 572 //---------------------------WarmCallGenerator--------------------------------
 573 // Internal class which handles initial deferral of inlining decisions.
 574 class WarmCallGenerator : public CallGenerator {




 279   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
 280   float past_uses = m->interpreter_invocation_count();
 281   float expected_uses = past_uses;
 282   return new ParseGenerator(m, expected_uses, true);
 283 }
 284 
 285 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 286   assert(!m->is_abstract(), "for_direct_call mismatch");
 287   return new DirectCallGenerator(m, separate_io_proj);
 288 }
 289 
 290 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 291   assert(!m->is_static(), "for_virtual_call mismatch");
 292   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 293   return new VirtualCallGenerator(m, vtable_index);
 294 }
 295 
 296 // Allow inlining decisions to be delayed
 297 class LateInlineCallGenerator : public DirectCallGenerator {
 298  private:
 299   jlong _unique_id;   // unique id for log compilation
 300   bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
 301 
 302  protected:
 303   CallGenerator* _inline_cg;
 304   virtual bool do_late_inline_check(JVMState* jvms) { return true; }
 305 
 306  public:
 307   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
 308     DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
 309 
 310   virtual bool is_late_inline() const { return true; }
 311 
 312   // Convert the CallStaticJava into an inline
 313   virtual void do_late_inline();
 314 
 315   virtual JVMState* generate(JVMState* jvms) {
 316     Compile *C = Compile::current();
 317 
 318     C->log_inline_id(this);
 319 
 320     // Record that this call site should be revisited once the main
 321     // parse is finished.
 322     if (!is_mh_late_inline()) {
 323       C->add_late_inline(this);
 324     }
 325 
 326     // Emit the CallStaticJava and request separate projections so
 327     // that the late inlining logic can distinguish between fall
 328     // through and exceptional uses of the memory and io projections


 372   // check for unreachable loop
 373   CallProjections callprojs;
 374   call->extract_projections(&callprojs, true);
 375   if (callprojs.fallthrough_catchproj == call->in(0) ||
 376       callprojs.catchall_catchproj == call->in(0) ||
 377       callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
 378       callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
 379       callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
 380       callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
 381       (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
 382       (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
 383     return;
 384   }
 385 
 386   Compile* C = Compile::current();
 387   // Remove inlined methods from Compiler's lists.
 388   if (call->is_macro()) {
 389     C->remove_macro_node(call);
 390   }
 391 
 392   bool result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
 393   if (_is_pure_call && result_not_used) {
 394     // The call is marked as pure (no important side effects), but result isn't used.
 395     // It's safe to remove the call.
 396     GraphKit kit(call->jvms());
 397     kit.replace_call(call, C->top(), true);
 398   } else {
 399     // Make a clone of the JVMState that appropriate to use for driving a parse
 400     JVMState* old_jvms = call->jvms();
 401     JVMState* jvms = old_jvms->clone_shallow(C);
 402     uint size = call->req();
 403     SafePointNode* map = new SafePointNode(size, jvms);
 404     for (uint i1 = 0; i1 < size; i1++) {
 405       map->init_req(i1, call->in(i1));
 406     }
 407 
 408     // Make sure the state is a MergeMem for parsing.
 409     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 410       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 411       C->initial_gvn()->set_type_bottom(mem);
 412       map->set_req(TypeFunc::Memory, mem);
 413     }
 414 
 415     uint nargs = method()->arg_size();
 416     // blow away old call arguments
 417     Node* top = C->top();
 418     for (uint i1 = 0; i1 < nargs; i1++) {


 451     // Now perform the inlining using the synthesized JVMState
 452     JVMState* new_jvms = _inline_cg->generate(jvms);
 453     if (new_jvms == NULL)  return;  // no change
 454     if (C->failing())      return;
 455 
 456     // Capture any exceptional control flow
 457     GraphKit kit(new_jvms);
 458 
 459     // Find the result object
 460     Node* result = C->top();
 461     int   result_size = method()->return_type()->size();
 462     if (result_size != 0 && !kit.stopped()) {
 463       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 464     }
 465 
 466     C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 467     C->env()->notice_inlined_method(_inline_cg->method());
 468     C->set_inlining_progress(true);
 469     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 470     kit.replace_call(call, result, true);
 471   }
 472 }
 473 
 474 
 475 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 476   return new LateInlineCallGenerator(method, inline_cg);
 477 }
 478 
 479 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 480   ciMethod* _caller;
 481   int _attempt;
 482   bool _input_not_const;
 483 
 484   virtual bool do_late_inline_check(JVMState* jvms);
 485   virtual bool already_attempted() const { return _attempt > 0; }
 486 
 487  public:
 488   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 489     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
 490 
 491   virtual bool is_mh_late_inline() const { return true; }


 542 
 543     C->log_inline_id(this);
 544 
 545     C->add_string_late_inline(this);
 546 
 547     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 548     return new_jvms;
 549   }
 550 
 551   virtual bool is_string_late_inline() const { return true; }
 552 };
 553 
 554 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 555   return new LateInlineStringCallGenerator(method, inline_cg);
 556 }
 557 
 558 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 559 
 560  public:
 561   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 562     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 563 
 564   virtual JVMState* generate(JVMState* jvms) {
 565     Compile *C = Compile::current();
 566 
 567     C->log_inline_id(this);
 568 
 569     C->add_boxing_late_inline(this);
 570 
 571     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
 572     return new_jvms;
 573   }
 574 };
 575 
 576 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 577   return new LateInlineBoxingCallGenerator(method, inline_cg);
 578 }
 579 
 580 //---------------------------WarmCallGenerator--------------------------------
 581 // Internal class which handles initial deferral of inlining decisions.
 582 class WarmCallGenerator : public CallGenerator {


< prev index next >