378
379 // Now perform the inling using the synthesized JVMState
380 JVMState* new_jvms = _inline_cg->generate(jvms);
381 if (new_jvms == NULL) return; // no change
382 if (C->failing()) return;
383
384 // Capture any exceptional control flow
385 GraphKit kit(new_jvms);
386
387 // Find the result object
388 Node* result = C->top();
389 int result_size = method()->return_type()->size();
390 if (result_size != 0 && !kit.stopped()) {
391 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
392 }
393
394 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
395 C->env()->notice_inlined_method(_inline_cg->method());
396 C->set_inlining_progress(true);
397
398 kit.replace_call(call, result);
399 }
400
401
402 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
403 return new LateInlineCallGenerator(method, inline_cg);
404 }
405
406 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
407 ciMethod* _caller;
408 int _attempt;
409 bool _input_not_const;
410
411 virtual bool do_late_inline_check(JVMState* jvms);
412 virtual bool already_attempted() const { return _attempt > 0; }
413
414 public:
415 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
416 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
417
418 virtual bool is_mh_late_inline() const { return true; }
597
598
599 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
600 GraphKit kit(jvms);
601 PhaseGVN& gvn = kit.gvn();
602 // We need an explicit receiver null_check before checking its type.
603 // We share a map with the caller, so his JVMS gets adjusted.
604 Node* receiver = kit.argument(0);
605
606 CompileLog* log = kit.C->log();
607 if (log != NULL) {
608 log->elem("predicted_call bci='%d' klass='%d'",
609 jvms->bci(), log->identify(_predicted_receiver));
610 }
611
612 receiver = kit.null_check_receiver_before_call(method());
613 if (kit.stopped()) {
614 return kit.transfer_exceptions_into_jvms();
615 }
616
617 Node* exact_receiver = receiver; // will get updated in place...
618 Node* slow_ctl = kit.type_check_receiver(receiver,
619 _predicted_receiver, _hit_prob,
620 &exact_receiver);
621
622 SafePointNode* slow_map = NULL;
623 JVMState* slow_jvms;
624 { PreserveJVMState pjvms(&kit);
625 kit.set_control(slow_ctl);
626 if (!kit.stopped()) {
627 slow_jvms = _if_missed->generate(kit.sync_jvms());
628 if (kit.failing())
629 return NULL; // might happen because of NodeCountInliningCutoff
630 assert(slow_jvms != NULL, "must be");
631 kit.add_exception_states_from(slow_jvms);
632 kit.set_map(slow_jvms->map());
633 if (!kit.stopped())
634 slow_map = kit.stop();
635 }
636 }
649 if (new_jvms == NULL) {
650 // Inline failed, so make a direct call.
651 assert(_if_hit->is_inline(), "must have been a failed inline");
652 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
653 new_jvms = cg->generate(kit.sync_jvms());
654 }
655 kit.add_exception_states_from(new_jvms);
656 kit.set_jvms(new_jvms);
657
658 // Need to merge slow and fast?
659 if (slow_map == NULL) {
660 // The fast path is the only path remaining.
661 return kit.transfer_exceptions_into_jvms();
662 }
663
664 if (kit.stopped()) {
665 // Inlined method threw an exception, so it's just the slow path after all.
666 kit.set_jvms(slow_jvms);
667 return kit.transfer_exceptions_into_jvms();
668 }
669
670 // Finish the diamond.
671 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
672 RegionNode* region = new (kit.C) RegionNode(3);
673 region->init_req(1, kit.control());
674 region->init_req(2, slow_map->control());
675 kit.set_control(gvn.transform(region));
676 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
677 iophi->set_req(2, slow_map->i_o());
678 kit.set_i_o(gvn.transform(iophi));
679 kit.merge_memory(slow_map->merged_memory(), region, 2);
680 uint tos = kit.jvms()->stkoff() + kit.sp();
681 uint limit = slow_map->req();
682 for (uint i = TypeFunc::Parms; i < limit; i++) {
683 // Skip unused stack slots; fast forward to monoff();
684 if (i == tos) {
685 i = kit.jvms()->monoff();
686 if( i >= limit ) break;
687 }
688 Node* m = kit.map()->in(i);
|
378
379 // Now perform the inling using the synthesized JVMState
380 JVMState* new_jvms = _inline_cg->generate(jvms);
381 if (new_jvms == NULL) return; // no change
382 if (C->failing()) return;
383
384 // Capture any exceptional control flow
385 GraphKit kit(new_jvms);
386
387 // Find the result object
388 Node* result = C->top();
389 int result_size = method()->return_type()->size();
390 if (result_size != 0 && !kit.stopped()) {
391 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
392 }
393
394 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
395 C->env()->notice_inlined_method(_inline_cg->method());
396 C->set_inlining_progress(true);
397
398 kit.replace_call(call, result, true);
399 }
400
401
402 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
403 return new LateInlineCallGenerator(method, inline_cg);
404 }
405
406 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
407 ciMethod* _caller;
408 int _attempt;
409 bool _input_not_const;
410
411 virtual bool do_late_inline_check(JVMState* jvms);
412 virtual bool already_attempted() const { return _attempt > 0; }
413
414 public:
415 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
416 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
417
418 virtual bool is_mh_late_inline() const { return true; }
597
598
599 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
600 GraphKit kit(jvms);
601 PhaseGVN& gvn = kit.gvn();
602 // We need an explicit receiver null_check before checking its type.
603 // We share a map with the caller, so his JVMS gets adjusted.
604 Node* receiver = kit.argument(0);
605
606 CompileLog* log = kit.C->log();
607 if (log != NULL) {
608 log->elem("predicted_call bci='%d' klass='%d'",
609 jvms->bci(), log->identify(_predicted_receiver));
610 }
611
612 receiver = kit.null_check_receiver_before_call(method());
613 if (kit.stopped()) {
614 return kit.transfer_exceptions_into_jvms();
615 }
616
617 // Make a copy of the replaced nodes in case we need to restore them
618 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
619 replaced_nodes.clone();
620
621 Node* exact_receiver = receiver; // will get updated in place...
622 Node* slow_ctl = kit.type_check_receiver(receiver,
623 _predicted_receiver, _hit_prob,
624 &exact_receiver);
625
626 SafePointNode* slow_map = NULL;
627 JVMState* slow_jvms;
628 { PreserveJVMState pjvms(&kit);
629 kit.set_control(slow_ctl);
630 if (!kit.stopped()) {
631 slow_jvms = _if_missed->generate(kit.sync_jvms());
632 if (kit.failing())
633 return NULL; // might happen because of NodeCountInliningCutoff
634 assert(slow_jvms != NULL, "must be");
635 kit.add_exception_states_from(slow_jvms);
636 kit.set_map(slow_jvms->map());
637 if (!kit.stopped())
638 slow_map = kit.stop();
639 }
640 }
653 if (new_jvms == NULL) {
654 // Inline failed, so make a direct call.
655 assert(_if_hit->is_inline(), "must have been a failed inline");
656 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
657 new_jvms = cg->generate(kit.sync_jvms());
658 }
659 kit.add_exception_states_from(new_jvms);
660 kit.set_jvms(new_jvms);
661
662 // Need to merge slow and fast?
663 if (slow_map == NULL) {
664 // The fast path is the only path remaining.
665 return kit.transfer_exceptions_into_jvms();
666 }
667
668 if (kit.stopped()) {
669 // Inlined method threw an exception, so it's just the slow path after all.
670 kit.set_jvms(slow_jvms);
671 return kit.transfer_exceptions_into_jvms();
672 }
673
674 // There are 2 branches and the replaced nodes are only valid on
675 // one: restore the replaced nodes to what they were before the
676 // branch.
677 kit.map()->set_replaced_nodes(replaced_nodes);
678
679 // Finish the diamond.
680 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
681 RegionNode* region = new (kit.C) RegionNode(3);
682 region->init_req(1, kit.control());
683 region->init_req(2, slow_map->control());
684 kit.set_control(gvn.transform(region));
685 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
686 iophi->set_req(2, slow_map->i_o());
687 kit.set_i_o(gvn.transform(iophi));
688 kit.merge_memory(slow_map->merged_memory(), region, 2);
689 uint tos = kit.jvms()->stkoff() + kit.sp();
690 uint limit = slow_map->req();
691 for (uint i = TypeFunc::Parms; i < limit; i++) {
692 // Skip unused stack slots; fast forward to monoff();
693 if (i == tos) {
694 i = kit.jvms()->monoff();
695 if( i >= limit ) break;
696 }
697 Node* m = kit.map()->in(i);
|