< prev index next >

src/share/vm/opto/callGenerator.cpp

Print this page
rev 10513 : fix incremental inlining with value types


 355     C->print_inlining_update_delayed(this);
 356   }
 357 
 358   virtual void set_unique_id(jlong id) {
 359     _unique_id = id;
 360   }
 361 
 362   virtual jlong unique_id() const {
 363     return _unique_id;
 364   }
 365 };
 366 
 367 void LateInlineCallGenerator::do_late_inline() {
 368   // Can't inline it
 369   CallStaticJavaNode* call = call_node();
 370   if (call == NULL || call->outcnt() == 0 ||
 371       call->in(0) == NULL || call->in(0)->is_top()) {
 372     return;
 373   }
 374   
 375   // FIXME: late inlining of methods that take value type arguments is
 376   // broken: arguments at the call are set up so fields of value type
 377   // arguments are passed but code here expects a single argument per
 378   // value type (a ValueTypeNode) instead.
 379   const TypeTuple *r = call->tf()->domain_sig();
 380   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 381     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 382       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 383       return;
 384     }
 385   }
 386 
 387   if (call->in(TypeFunc::Memory)->is_top()) {
 388     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 389     return;
 390   }
 391 
 392   Compile* C = Compile::current();
 393   // Remove inlined methods from Compiler's lists.
 394   if (call->is_macro()) {
 395     C->remove_macro_node(call);
 396   }
 397 
 398   // Make a clone of the JVMState that appropriate to use for driving a parse
 399   JVMState* old_jvms = call->jvms();
 400   JVMState* jvms = old_jvms->clone_shallow(C);
 401   uint size = call->req();
 402   SafePointNode* map = new SafePointNode(size, jvms);
 403   for (uint i1 = 0; i1 < size; i1++) {
 404     map->init_req(i1, call->in(i1));
 405   }
 406 

 407   // Make sure the state is a MergeMem for parsing.
 408   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 409     Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 410     C->initial_gvn()->set_type_bottom(mem);
 411     map->set_req(TypeFunc::Memory, mem);
 412   }
 413 
 414   uint nargs = method()->arg_size();
 415   // blow away old call arguments
 416   Node* top = C->top();
 417   for (uint i1 = 0; i1 < nargs; i1++) {
 418     map->set_req(TypeFunc::Parms + i1, top);
 419   }
 420   jvms->set_map(map);
 421 
 422   // Make enough space in the expression stack to transfer
 423   // the incoming arguments and return value.
 424   map->ensure_stack(jvms, jvms->method()->max_stack());





 425   for (uint i1 = 0; i1 < nargs; i1++) {
 426     map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));

















 427   }
 428 
 429   C->print_inlining_assert_ready();
 430 
 431   C->print_inlining_move_to(this);
 432 
 433   C->log_late_inline(this);
 434 
 435   // This check is done here because for_method_handle_inline() method
 436   // needs jvms for inlined state.
 437   if (!do_late_inline_check(jvms)) {
 438     map->disconnect_inputs(NULL, C);
 439     return;
 440   }
 441 
 442   // Setup default node notes to be picked up by the inlining
 443   Node_Notes* old_nn = C->node_notes_at(call->_idx);
 444   if (old_nn != NULL) {
 445     Node_Notes* entry_nn = old_nn->clone(C);
 446     entry_nn->set_jvms(jvms);


 448   }
 449 
 450   // Now perform the inlining using the synthesized JVMState
 451   JVMState* new_jvms = _inline_cg->generate(jvms);
 452   if (new_jvms == NULL)  return;  // no change
 453   if (C->failing())      return;
 454 
 455   // Capture any exceptional control flow
 456   GraphKit kit(new_jvms);
 457 
 458   // Find the result object
 459   Node* result = C->top();
 460   int   result_size = method()->return_type()->size();
 461   if (result_size != 0 && !kit.stopped()) {
 462     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 463   }
 464 
 465   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 466   C->env()->notice_inlined_method(_inline_cg->method());
 467   C->set_inlining_progress(true);




 468 
 469   kit.replace_call(call, result, true);
 470 }
 471 
 472 
 473 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 474   return new LateInlineCallGenerator(method, inline_cg);
 475 }
 476 
 477 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 478   ciMethod* _caller;
 479   int _attempt;
 480   bool _input_not_const;
 481 
 482   virtual bool do_late_inline_check(JVMState* jvms);
 483   virtual bool already_attempted() const { return _attempt > 0; }
 484 
 485  public:
 486   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 487     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}




 355     C->print_inlining_update_delayed(this);
 356   }
 357 
 358   virtual void set_unique_id(jlong id) {
 359     _unique_id = id;
 360   }
 361 
 362   virtual jlong unique_id() const {
 363     return _unique_id;
 364   }
 365 };
 366 
 367 void LateInlineCallGenerator::do_late_inline() {
 368   // Can't inline it
 369   CallStaticJavaNode* call = call_node();
 370   if (call == NULL || call->outcnt() == 0 ||
 371       call->in(0) == NULL || call->in(0)->is_top()) {
 372     return;
 373   }
 374   
 375   const TypeTuple *r = call->tf()->domain_cc();




 376   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
 377     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
 378       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 379       return;
 380     }
 381   }
 382 
 383   if (call->in(TypeFunc::Memory)->is_top()) {
 384     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 385     return;
 386   }
 387 
 388   Compile* C = Compile::current();
 389   // Remove inlined methods from Compiler's lists.
 390   if (call->is_macro()) {
 391     C->remove_macro_node(call);
 392   }
 393 
 394   // Make a clone of the JVMState that appropriate to use for driving a parse
 395   JVMState* old_jvms = call->jvms();
 396   JVMState* jvms = old_jvms->clone_shallow(C);
 397   uint size = call->req();
 398   SafePointNode* map = new SafePointNode(size, jvms);
 399   for (uint i1 = 0; i1 < size; i1++) {
 400     map->init_req(i1, call->in(i1));
 401   }
 402 
 403   PhaseGVN& gvn = *C->initial_gvn();
 404   // Make sure the state is a MergeMem for parsing.
 405   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 406     Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 407     gvn.set_type_bottom(mem);
 408     map->set_req(TypeFunc::Memory, mem);
 409   }
 410 

 411   // blow away old call arguments
 412   Node* top = C->top();
 413   for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) {
 414     map->set_req(i1, top);
 415   }
 416   jvms->set_map(map);
 417 
 418   // Make enough space in the expression stack to transfer
 419   // the incoming arguments and return value.
 420   map->ensure_stack(jvms, jvms->method()->max_stack());
 421   const TypeTuple *domain_sig = call->_tf->domain_sig();
 422   uint nargs = method()->arg_size();
 423   assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 424 
 425   uint j = TypeFunc::Parms;
 426   for (uint i1 = 0; i1 < nargs; i1++) {
 427     const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 428     if (!ValueTypePassFieldsAsArgs) {
 429       Node* arg = call->in(TypeFunc::Parms + i1);
 430       if (t->isa_valuetypeptr()) {
 431         arg = ValueTypeNode::make(gvn, map->memory(), arg);
 432       }
 433       map->set_argument(jvms, i1, arg);
 434     } else {
 435       if (t->isa_valuetypeptr()) {
 436         ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
 437         Node* vt = C->create_vt_node(call, vk, vk, 0, j);
 438         map->set_argument(jvms, i1, gvn.transform(vt));
 439         j += vk->value_arg_slots();
 440       } else {
 441         map->set_argument(jvms, i1, call->in(j));
 442         j++;
 443       }
 444     }
 445   }
 446 
 447   C->print_inlining_assert_ready();
 448 
 449   C->print_inlining_move_to(this);
 450 
 451   C->log_late_inline(this);
 452 
 453   // This check is done here because for_method_handle_inline() method
 454   // needs jvms for inlined state.
 455   if (!do_late_inline_check(jvms)) {
 456     map->disconnect_inputs(NULL, C);
 457     return;
 458   }
 459 
 460   // Setup default node notes to be picked up by the inlining
 461   Node_Notes* old_nn = C->node_notes_at(call->_idx);
 462   if (old_nn != NULL) {
 463     Node_Notes* entry_nn = old_nn->clone(C);
 464     entry_nn->set_jvms(jvms);


 466   }
 467 
 468   // Now perform the inlining using the synthesized JVMState
 469   JVMState* new_jvms = _inline_cg->generate(jvms);
 470   if (new_jvms == NULL)  return;  // no change
 471   if (C->failing())      return;
 472 
 473   // Capture any exceptional control flow
 474   GraphKit kit(new_jvms);
 475 
 476   // Find the result object
 477   Node* result = C->top();
 478   int   result_size = method()->return_type()->size();
 479   if (result_size != 0 && !kit.stopped()) {
 480     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 481   }
 482 
 483   C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
 484   C->env()->notice_inlined_method(_inline_cg->method());
 485   C->set_inlining_progress(true);
 486 
 487   if (result->is_ValueType()) {
 488     result = result->as_ValueType()->store_to_memory(&kit);
 489   }
 490 
 491   kit.replace_call(call, result, true);
 492 }
 493 
 494 
 495 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 496   return new LateInlineCallGenerator(method, inline_cg);
 497 }
 498 
 499 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 500   ciMethod* _caller;
 501   int _attempt;
 502   bool _input_not_const;
 503 
 504   virtual bool do_late_inline_check(JVMState* jvms);
 505   virtual bool already_attempted() const { return _attempt > 0; }
 506 
 507  public:
 508   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 509     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}


< prev index next >