301 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
302 if (InlineTree::check_can_parse(m) != NULL) return NULL;
303 return new ParseGenerator(m, expected_uses);
304 }
305
306 // As a special case, the JVMS passed to this CallGenerator is
307 // for the method execution already in progress, not just the JVMS
308 // of the caller. Thus, this CallGenerator cannot be mixed with others!
309 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
310 if (InlineTree::check_can_parse(m) != NULL) return NULL;
311 float past_uses = m->interpreter_invocation_count();
312 float expected_uses = past_uses;
313 return new ParseGenerator(m, expected_uses, true);
314 }
315
316 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
317 assert(!m->is_abstract(), "for_direct_call mismatch");
318 return new DirectCallGenerator(m, separate_io_proj);
319 }
320
321 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
322 assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch");
323 return new DynamicCallGenerator(m);
324 }
325
326 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
327 assert(!m->is_static(), "for_virtual_call mismatch");
328 assert(!m->is_method_handle_invoke(), "should be a direct call");
329 return new VirtualCallGenerator(m, vtable_index);
330 }
331
332 // Allow inlining decisions to be delayed
333 class LateInlineCallGenerator : public DirectCallGenerator {
334 CallGenerator* _inline_cg;
335
336 public:
337 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
338 DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
339
340 virtual bool is_late_inline() const { return true; }
341
342 // Convert the CallStaticJava into an inline
343 virtual void do_late_inline();
344
345 JVMState* generate(JVMState* jvms) {
346 // Record that this call site should be revisited once the main
347 // parse is finished.
348 Compile::current()->add_late_inline(this);
349
350 // Emit the CallStaticJava and request separate projections so
351 // that the late inlining logic can distinguish between fall
559 log->elem("predicted_call bci='%d' klass='%d'",
560 jvms->bci(), log->identify(_predicted_receiver));
561 }
562
563 receiver = kit.null_check_receiver(method());
564 if (kit.stopped()) {
565 return kit.transfer_exceptions_into_jvms();
566 }
567
568 Node* exact_receiver = receiver; // will get updated in place...
569 Node* slow_ctl = kit.type_check_receiver(receiver,
570 _predicted_receiver, _hit_prob,
571 &exact_receiver);
572
573 SafePointNode* slow_map = NULL;
574 JVMState* slow_jvms;
575 { PreserveJVMState pjvms(&kit);
576 kit.set_control(slow_ctl);
577 if (!kit.stopped()) {
578 slow_jvms = _if_missed->generate(kit.sync_jvms());
579 assert(slow_jvms != NULL, "miss path must not fail to generate");
580 kit.add_exception_states_from(slow_jvms);
581 kit.set_map(slow_jvms->map());
582 if (!kit.stopped())
583 slow_map = kit.stop();
584 }
585 }
586
587 if (kit.stopped()) {
588 // Instance exactly does not matches the desired type.
589 kit.set_jvms(slow_jvms);
590 return kit.transfer_exceptions_into_jvms();
591 }
592
593 // fall through if the instance exactly matches the desired type
594 kit.replace_in_map(receiver, exact_receiver);
595
596 // Make the hot call:
597 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
598 if (new_jvms == NULL) {
599 // Inline failed, so make a direct call.
665 _if_missed(if_missed),
666 _if_hit(if_hit),
667 _hit_prob(hit_prob)
668 {}
669
670 virtual bool is_inline() const { return _if_hit->is_inline(); }
671 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
672
673 virtual JVMState* generate(JVMState* jvms);
674 };
675
676
677 CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
678 CallGenerator* if_missed,
679 CallGenerator* if_hit,
680 float hit_prob) {
681 return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob);
682 }
683
684
685 CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms,
686 ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
687 if (method_handle->Opcode() == Op_ConP) {
688 const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr();
689 ciObject* const_oop = oop_ptr->const_oop();
690 ciMethodHandle* method_handle = const_oop->as_method_handle();
691
692 // Set the callee to have access to the class and signature in
693 // the MethodHandleCompiler.
694 method_handle->set_callee(callee);
695 method_handle->set_caller(caller);
696 method_handle->set_call_profile(profile);
697
698 // Get an adapter for the MethodHandle.
699 ciMethod* target_method = method_handle->get_method_handle_adapter();
700 if (target_method != NULL) {
701 CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
702 if (cg != NULL && cg->is_inline())
703 return cg;
704 }
705 } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 &&
706 method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) {
707 float prob = PROB_FAIR;
708 Node* meth_region = method_handle->in(0);
709 if (meth_region->is_Region() &&
710 meth_region->in(1)->is_Proj() && meth_region->in(2)->is_Proj() &&
711 meth_region->in(1)->in(0) == meth_region->in(2)->in(0) &&
712 meth_region->in(1)->in(0)->is_If()) {
713 // If diamond, so grab the probability of the test to drive the inlining below
714 prob = meth_region->in(1)->in(0)->as_If()->_prob;
715 if (meth_region->in(1)->is_IfTrue()) {
716 prob = 1 - prob;
717 }
718 }
719
720 // selectAlternative idiom merging two constant MethodHandles.
721 // Generate a guard so that each can be inlined. We might want to
722 // do more inputs at later point but this gets the most common
723 // case.
724 CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob));
725 CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile.rescale(prob));
726 if (cg1 != NULL && cg2 != NULL) {
727 const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
728 ciObject* const_oop = oop_ptr->const_oop();
729 ciMethodHandle* mh = const_oop->as_method_handle();
730 return new PredictedDynamicCallGenerator(mh, cg2, cg1, prob);
731 }
732 }
733 return NULL;
734 }
735
736
737 CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms,
738 ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
739 ciMethodHandle* method_handle = call_site->get_target();
740
741 // Set the callee to have access to the class and signature in the
742 // MethodHandleCompiler.
743 method_handle->set_callee(callee);
744 method_handle->set_caller(caller);
745 method_handle->set_call_profile(profile);
746
747 // Get an adapter for the MethodHandle.
748 ciMethod* target_method = method_handle->get_invokedynamic_adapter();
749 if (target_method != NULL) {
750 Compile *C = Compile::current();
751 CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
752 if (cg != NULL && cg->is_inline()) {
753 // Add a dependence for invalidation of the optimization.
754 if (!call_site->is_constant_call_site()) {
755 C->dependencies()->assert_call_site_target_value(call_site, method_handle);
802 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
803
804 // Load the target MethodHandle from the CallSite object.
805 Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
806 Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
807
808 // Check if the MethodHandle is still the same.
809 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
810 bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
811 }
812 IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
813 kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff)));
814 Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff));
815
816 SafePointNode* slow_map = NULL;
817 JVMState* slow_jvms;
818 { PreserveJVMState pjvms(&kit);
819 kit.set_control(slow_ctl);
820 if (!kit.stopped()) {
821 slow_jvms = _if_missed->generate(kit.sync_jvms());
822 assert(slow_jvms != NULL, "miss path must not fail to generate");
823 kit.add_exception_states_from(slow_jvms);
824 kit.set_map(slow_jvms->map());
825 if (!kit.stopped())
826 slow_map = kit.stop();
827 }
828 }
829
830 if (kit.stopped()) {
831 // Instance exactly does not matches the desired type.
832 kit.set_jvms(slow_jvms);
833 return kit.transfer_exceptions_into_jvms();
834 }
835
836 // Make the hot call:
837 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
838 if (new_jvms == NULL) {
839 // Inline failed, so make a direct call.
840 assert(_if_hit->is_inline(), "must have been a failed inline");
841 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
842 new_jvms = cg->generate(kit.sync_jvms());
|
301 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
302 if (InlineTree::check_can_parse(m) != NULL) return NULL;
303 return new ParseGenerator(m, expected_uses);
304 }
305
306 // As a special case, the JVMS passed to this CallGenerator is
307 // for the method execution already in progress, not just the JVMS
308 // of the caller. Thus, this CallGenerator cannot be mixed with others!
309 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
310 if (InlineTree::check_can_parse(m) != NULL) return NULL;
311 float past_uses = m->interpreter_invocation_count();
312 float expected_uses = past_uses;
313 return new ParseGenerator(m, expected_uses, true);
314 }
315
316 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
317 assert(!m->is_abstract(), "for_direct_call mismatch");
318 return new DirectCallGenerator(m, separate_io_proj);
319 }
320
321 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
322 assert(!m->is_static(), "for_virtual_call mismatch");
323 assert(!m->is_method_handle_invoke(), "should be a direct call");
324 return new VirtualCallGenerator(m, vtable_index);
325 }
326
327 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
328 assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch");
329 return new DynamicCallGenerator(m);
330 }
331
332 // Allow inlining decisions to be delayed
333 class LateInlineCallGenerator : public DirectCallGenerator {
334 CallGenerator* _inline_cg;
335
336 public:
337 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
338 DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
339
340 virtual bool is_late_inline() const { return true; }
341
342 // Convert the CallStaticJava into an inline
343 virtual void do_late_inline();
344
345 JVMState* generate(JVMState* jvms) {
346 // Record that this call site should be revisited once the main
347 // parse is finished.
348 Compile::current()->add_late_inline(this);
349
350 // Emit the CallStaticJava and request separate projections so
351 // that the late inlining logic can distinguish between fall
559 log->elem("predicted_call bci='%d' klass='%d'",
560 jvms->bci(), log->identify(_predicted_receiver));
561 }
562
563 receiver = kit.null_check_receiver(method());
564 if (kit.stopped()) {
565 return kit.transfer_exceptions_into_jvms();
566 }
567
568 Node* exact_receiver = receiver; // will get updated in place...
569 Node* slow_ctl = kit.type_check_receiver(receiver,
570 _predicted_receiver, _hit_prob,
571 &exact_receiver);
572
573 SafePointNode* slow_map = NULL;
574 JVMState* slow_jvms;
575 { PreserveJVMState pjvms(&kit);
576 kit.set_control(slow_ctl);
577 if (!kit.stopped()) {
578 slow_jvms = _if_missed->generate(kit.sync_jvms());
579 if (kit.failing())
580 return NULL; // might happen because of NodeCountInliningCutoff
581 assert(slow_jvms != NULL, "must be");
582 kit.add_exception_states_from(slow_jvms);
583 kit.set_map(slow_jvms->map());
584 if (!kit.stopped())
585 slow_map = kit.stop();
586 }
587 }
588
589 if (kit.stopped()) {
590 // Instance exactly does not matches the desired type.
591 kit.set_jvms(slow_jvms);
592 return kit.transfer_exceptions_into_jvms();
593 }
594
595 // fall through if the instance exactly matches the desired type
596 kit.replace_in_map(receiver, exact_receiver);
597
598 // Make the hot call:
599 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
600 if (new_jvms == NULL) {
601 // Inline failed, so make a direct call.
667 _if_missed(if_missed),
668 _if_hit(if_hit),
669 _hit_prob(hit_prob)
670 {}
671
672 virtual bool is_inline() const { return _if_hit->is_inline(); }
673 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
674
675 virtual JVMState* generate(JVMState* jvms);
676 };
677
678
679 CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
680 CallGenerator* if_missed,
681 CallGenerator* if_hit,
682 float hit_prob) {
683 return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob);
684 }
685
686
687 CallGenerator* CallGenerator::for_method_handle_call(Node* method_handle, JVMState* jvms,
688 ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
689 assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_method_handle_call mismatch");
690 CallGenerator* cg = CallGenerator::for_method_handle_inline(method_handle, jvms, caller, callee, profile);
691 if (cg != NULL)
692 return cg;
693 return CallGenerator::for_direct_call(callee);
694 }
695
696 CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms,
697 ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
698 if (method_handle->Opcode() == Op_ConP) {
699 const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr();
700 ciObject* const_oop = oop_ptr->const_oop();
701 ciMethodHandle* method_handle = const_oop->as_method_handle();
702
703 // Set the callee to have access to the class and signature in
704 // the MethodHandleCompiler.
705 method_handle->set_callee(callee);
706 method_handle->set_caller(caller);
707 method_handle->set_call_profile(profile);
708
709 // Get an adapter for the MethodHandle.
710 ciMethod* target_method = method_handle->get_method_handle_adapter();
711 if (target_method != NULL) {
712 CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
713 if (cg != NULL && cg->is_inline())
714 return cg;
715 }
716 } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 &&
717 method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) {
718 float prob = PROB_FAIR;
719 Node* meth_region = method_handle->in(0);
720 if (meth_region->is_Region() &&
721 meth_region->in(1)->is_Proj() && meth_region->in(2)->is_Proj() &&
722 meth_region->in(1)->in(0) == meth_region->in(2)->in(0) &&
723 meth_region->in(1)->in(0)->is_If()) {
724 // If diamond, so grab the probability of the test to drive the inlining below
725 prob = meth_region->in(1)->in(0)->as_If()->_prob;
726 if (meth_region->in(1)->is_IfTrue()) {
727 prob = 1 - prob;
728 }
729 }
730
731 // selectAlternative idiom merging two constant MethodHandles.
732 // Generate a guard so that each can be inlined. We might want to
733 // do more inputs at later point but this gets the most common
734 // case.
735 CallGenerator* cg1 = for_method_handle_call(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob));
736 CallGenerator* cg2 = for_method_handle_call(method_handle->in(2), jvms, caller, callee, profile.rescale(prob));
737 if (cg1 != NULL && cg2 != NULL) {
738 const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
739 ciObject* const_oop = oop_ptr->const_oop();
740 ciMethodHandle* mh = const_oop->as_method_handle();
741 return new PredictedDynamicCallGenerator(mh, cg2, cg1, prob);
742 }
743 }
744 return NULL;
745 }
746
747 CallGenerator* CallGenerator::for_invokedynamic_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
748 assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_invokedynamic_call mismatch");
749 // Get the CallSite object.
750 ciBytecodeStream str(caller);
751 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
752 ciCallSite* call_site = str.get_call_site();
753 CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, callee, profile);
754 if (cg != NULL)
755 return cg;
756 return CallGenerator::for_dynamic_call(callee);
757 }
758
759 CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms,
760 ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
761 ciMethodHandle* method_handle = call_site->get_target();
762
763 // Set the callee to have access to the class and signature in the
764 // MethodHandleCompiler.
765 method_handle->set_callee(callee);
766 method_handle->set_caller(caller);
767 method_handle->set_call_profile(profile);
768
769 // Get an adapter for the MethodHandle.
770 ciMethod* target_method = method_handle->get_invokedynamic_adapter();
771 if (target_method != NULL) {
772 Compile *C = Compile::current();
773 CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
774 if (cg != NULL && cg->is_inline()) {
775 // Add a dependence for invalidation of the optimization.
776 if (!call_site->is_constant_call_site()) {
777 C->dependencies()->assert_call_site_target_value(call_site, method_handle);
824 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
825
826 // Load the target MethodHandle from the CallSite object.
827 Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
828 Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
829
830 // Check if the MethodHandle is still the same.
831 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
832 bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
833 }
834 IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
835 kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff)));
836 Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff));
837
838 SafePointNode* slow_map = NULL;
839 JVMState* slow_jvms;
840 { PreserveJVMState pjvms(&kit);
841 kit.set_control(slow_ctl);
842 if (!kit.stopped()) {
843 slow_jvms = _if_missed->generate(kit.sync_jvms());
844 if (kit.failing())
845 return NULL; // might happen because of NodeCountInliningCutoff
846 assert(slow_jvms != NULL, "must be");
847 kit.add_exception_states_from(slow_jvms);
848 kit.set_map(slow_jvms->map());
849 if (!kit.stopped())
850 slow_map = kit.stop();
851 }
852 }
853
854 if (kit.stopped()) {
855 // Instance exactly does not matches the desired type.
856 kit.set_jvms(slow_jvms);
857 return kit.transfer_exceptions_into_jvms();
858 }
859
860 // Make the hot call:
861 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
862 if (new_jvms == NULL) {
863 // Inline failed, so make a direct call.
864 assert(_if_hit->is_inline(), "must have been a failed inline");
865 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
866 new_jvms = cg->generate(kit.sync_jvms());
|