src/share/vm/opto/callGenerator.cpp

Print this page
rev 3898 : 8005031: Some cleanup in c2 to prepare for incremental inlining support
Summary: collection of small changes to prepare for incremental inlining.
Reviewed-by:


 257 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 258   assert(!m->is_static(), "for_virtual_call mismatch");
 259   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 260   return new VirtualCallGenerator(m, vtable_index);
 261 }
 262 
 263 // Allow inlining decisions to be delayed
 264 class LateInlineCallGenerator : public DirectCallGenerator {
 265   CallGenerator* _inline_cg;
 266 
 267  public:
 268   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 269     DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
 270 
 271   virtual bool      is_late_inline() const { return true; }
 272 
 273   // Convert the CallStaticJava into an inline
 274   virtual void do_late_inline();
 275 
 276   virtual JVMState* generate(JVMState* jvms) {



 277     // Record that this call site should be revisited once the main
 278     // parse is finished.
 279     Compile::current()->add_late_inline(this);
 280 
 281     // Emit the CallStaticJava and request separate projections so
 282     // that the late inlining logic can distinguish between fall
 283     // through and exceptional uses of the memory and io projections
 284     // as is done for allocations and macro expansion.
 285     return DirectCallGenerator::generate(jvms);
 286   }
 287 
 288 };
 289 
 290 
 291 void LateInlineCallGenerator::do_late_inline() {
 292   // Can't inline it
 293   if (call_node() == NULL || call_node()->outcnt() == 0 ||
 294       call_node()->in(0) == NULL || call_node()->in(0)->is_top())
 295     return;
 296 
 297   CallStaticJavaNode* call = call_node();
 298 
 299   // Make a clone of the JVMState that appropriate to use for driving a parse
 300   Compile* C = Compile::current();
 301   JVMState* jvms     = call->jvms()->clone_shallow(C);
 302   uint size = call->req();
 303   SafePointNode* map = new (C) SafePointNode(size, jvms);
 304   for (uint i1 = 0; i1 < size; i1++) {
 305     map->init_req(i1, call->in(i1));
 306   }
 307 
 308   // Make sure the state is a MergeMem for parsing.
 309   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 310     map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));


 311   }
 312 
 313   // Make enough space for the expression stack and transfer the incoming arguments
 314   int nargs    = method()->arg_size();
 315   jvms->set_map(map);
 316   map->ensure_stack(jvms, jvms->method()->max_stack());
 317   if (nargs > 0) {
 318     for (int i1 = 0; i1 < nargs; i1++) {
 319       map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
 320     }
 321   }
 322 


 323   CompileLog* log = C->log();
 324   if (log != NULL) {
 325     log->head("late_inline method='%d'", log->identify(method()));
 326     JVMState* p = jvms;
 327     while (p != NULL) {
 328       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
 329       p = p->caller();
 330     }
 331     log->tail("late_inline");
 332   }
 333 
 334   // Setup default node notes to be picked up by the inlining
 335   Node_Notes* old_nn = C->default_node_notes();
 336   if (old_nn != NULL) {
 337     Node_Notes* entry_nn = old_nn->clone(C);
 338     entry_nn->set_jvms(jvms);
 339     C->set_default_node_notes(entry_nn);
 340   }
 341 
 342   // Now perform the inling using the synthesized JVMState


 591 
 592 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
 593   GraphKit kit(jvms);
 594   PhaseGVN& gvn = kit.gvn();
 595   Compile* C = kit.C;
 596   vmIntrinsics::ID iid = callee->intrinsic_id();
 597   switch (iid) {
 598   case vmIntrinsics::_invokeBasic:
 599     {
 600       // Get MethodHandle receiver:
 601       Node* receiver = kit.argument(0);
 602       if (receiver->Opcode() == Op_ConP) {
 603         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 604         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 605         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
 606         const int vtable_index = Method::invalid_vtable_index;
 607         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS);
 608         if (cg != NULL && cg->is_inline())
 609           return cg;
 610       } else {
 611         if (PrintInlining)  CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
 612       }
 613     }
 614     break;
 615 
 616   case vmIntrinsics::_linkToVirtual:
 617   case vmIntrinsics::_linkToStatic:
 618   case vmIntrinsics::_linkToSpecial:
 619   case vmIntrinsics::_linkToInterface:
 620     {
 621       // Get MemberName argument:
 622       Node* member_name = kit.argument(callee->arg_size() - 1);
 623       if (member_name->Opcode() == Op_ConP) {
 624         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 625         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 626 
 627         // In lamda forms we erase signature types to avoid resolving issues
 628         // involving class loaders.  When we optimize a method handle invoke
 629         // to a direct call we must cast the receiver and arguments to its
 630         // actual types.
 631         ciSignature* signature = target->signature();




 257 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 258   assert(!m->is_static(), "for_virtual_call mismatch");
 259   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 260   return new VirtualCallGenerator(m, vtable_index);
 261 }
 262 
 263 // Allow inlining decisions to be delayed
 264 class LateInlineCallGenerator : public DirectCallGenerator {
 265   CallGenerator* _inline_cg;
 266 
 267  public:
 268   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 269     DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
 270 
 271   virtual bool      is_late_inline() const { return true; }
 272 
 273   // Convert the CallStaticJava into an inline
 274   virtual void do_late_inline();
 275 
 276   virtual JVMState* generate(JVMState* jvms) {
 277     Compile *C = Compile::current();
 278     C->print_inlining_skip(this);
 279 
 280     // Record that this call site should be revisited once the main
 281     // parse is finished.
 282     Compile::current()->add_late_inline(this);
 283 
 284     // Emit the CallStaticJava and request separate projections so
 285     // that the late inlining logic can distinguish between fall
 286     // through and exceptional uses of the memory and io projections
 287     // as is done for allocations and macro expansion.
 288     return DirectCallGenerator::generate(jvms);
 289   }

 290 };
 291 
 292 
 293 void LateInlineCallGenerator::do_late_inline() {
 294   // Can't inline it
 295   if (call_node() == NULL || call_node()->outcnt() == 0 ||
 296       call_node()->in(0) == NULL || call_node()->in(0)->is_top())
 297     return;
 298 
 299   CallStaticJavaNode* call = call_node();
 300 
 301   // Make a clone of the JVMState that appropriate to use for driving a parse
 302   Compile* C = Compile::current();
 303   JVMState* jvms     = call->jvms()->clone_shallow(C);
 304   uint size = call->req();
 305   SafePointNode* map = new (C) SafePointNode(size, jvms);
 306   for (uint i1 = 0; i1 < size; i1++) {
 307     map->init_req(i1, call->in(i1));
 308   }
 309 
 310   // Make sure the state is a MergeMem for parsing.
 311   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 312     Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
 313     C->initial_gvn()->set_type_bottom(mem);
 314     map->set_req(TypeFunc::Memory, mem);
 315   }
 316 
 317   // Make enough space for the expression stack and transfer the incoming arguments
 318   int nargs    = method()->arg_size();
 319   jvms->set_map(map);
 320   map->ensure_stack(jvms, jvms->method()->max_stack());
 321   if (nargs > 0) {
 322     for (int i1 = 0; i1 < nargs; i1++) {
 323       map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
 324     }
 325   }
 326 
 327   C->print_inlining_insert(this);
 328 
 329   CompileLog* log = C->log();
 330   if (log != NULL) {
 331     log->head("late_inline method='%d'", log->identify(method()));
 332     JVMState* p = jvms;
 333     while (p != NULL) {
 334       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
 335       p = p->caller();
 336     }
 337     log->tail("late_inline");
 338   }
 339 
 340   // Setup default node notes to be picked up by the inlining
 341   Node_Notes* old_nn = C->default_node_notes();
 342   if (old_nn != NULL) {
 343     Node_Notes* entry_nn = old_nn->clone(C);
 344     entry_nn->set_jvms(jvms);
 345     C->set_default_node_notes(entry_nn);
 346   }
 347 
 348   // Now perform the inling using the synthesized JVMState


 597 
 598 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
 599   GraphKit kit(jvms);
 600   PhaseGVN& gvn = kit.gvn();
 601   Compile* C = kit.C;
 602   vmIntrinsics::ID iid = callee->intrinsic_id();
 603   switch (iid) {
 604   case vmIntrinsics::_invokeBasic:
 605     {
 606       // Get MethodHandle receiver:
 607       Node* receiver = kit.argument(0);
 608       if (receiver->Opcode() == Op_ConP) {
 609         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
 610         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
 611         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
 612         const int vtable_index = Method::invalid_vtable_index;
 613         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS);
 614         if (cg != NULL && cg->is_inline())
 615           return cg;
 616       } else {
 617         if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
 618       }
 619     }
 620     break;
 621 
 622   case vmIntrinsics::_linkToVirtual:
 623   case vmIntrinsics::_linkToStatic:
 624   case vmIntrinsics::_linkToSpecial:
 625   case vmIntrinsics::_linkToInterface:
 626     {
 627       // Get MemberName argument:
 628       Node* member_name = kit.argument(callee->arg_size() - 1);
 629       if (member_name->Opcode() == Op_ConP) {
 630         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
 631         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
 632 
 633         // In lamda forms we erase signature types to avoid resolving issues
 634         // involving class loaders.  When we optimize a method handle invoke
 635         // to a direct call we must cast the receiver and arguments to its
 636         // actual types.
 637         ciSignature* signature = target->signature();