2694 // Accumulate any precedence edges
2695 if (mem->in(i) != NULL) {
2696 n->add_prec(mem->in(i));
2697 }
2698 }
2699 // Everything above this point has been processed.
2700 done = true;
2701 }
2702 // Eliminate the previous StoreCM
2703 prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
2704 assert(mem->outcnt() == 0, "should be dead");
2705 mem->disconnect_inputs(NULL, this);
2706 } else {
2707 prev = mem;
2708 }
2709 mem = prev->in(MemNode::Memory);
2710 }
2711 }
2712 }
2713
2714 void Compile::value_type_return_from_mh_intrinsic(CallNode *call, Final_Reshape_Counts &frc) {
2715 if (ValueTypeReturnedAsFields &&
2716 call->is_CallStaticJava() &&
2717 call->as_CallStaticJava()->method() != NULL &&
2718 call->as_CallStaticJava()->method()->is_method_handle_intrinsic() &&
2719 call->proj_out(TypeFunc::Parms) != NULL &&
2720 call->proj_out(TypeFunc::Parms)->bottom_type()->isa_valuetypeptr()) {
2721 // A value type is returned from the call but we don't know its
2722 // type. One of the values being returned is the klass of the
2723 // value type. We need to allocate a value type instance of that
2724 // type and initialize it with other values being returned. This
2725 // is done with the stub call below that we add right after this
2726 // call.
2727 Node* ret = call->proj_out(TypeFunc::Parms);
2728 assert(ret->bottom_type()->is_valuetypeptr()->klass() == env()->___Value_klass(), "unexpected return type from MH intrinsic");
2729 const TypeFunc* tf = call->_tf;
2730 const TypeTuple* domain = OptoRuntime::store_value_type_fields_Type()->domain_cc();
2731 const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain);
2732 call->_tf = new_tf;
2733
2734 CallProjections projs;
2735 call->extract_projections(&projs, true, true);
2736 Node* ctl = projs.fallthrough_catchproj;
2737 Node* mem = projs.fallthrough_memproj;
2738 Node* io = projs.fallthrough_ioproj;
2739 Node* ex_ctl = projs.catchall_catchproj;
2740 Node* ex_mem = projs.catchall_memproj;
2741 Node* ex_io = projs.catchall_ioproj;
2742 CallStaticJavaNode* rt_call = new CallStaticJavaNode(OptoRuntime::store_value_type_fields_Type(),
2743 StubRoutines::store_value_type_fields_to_buf(),
2744 "store_value_type_fields",
2745 call->jvms()->bci(),
2746 TypePtr::BOTTOM);
2747 Node* out_ctl = new ProjNode(rt_call, TypeFunc::Control);
2748 Node* out_mem = new ProjNode(rt_call, TypeFunc::Memory);
2749 Node* out_io = new ProjNode(rt_call, TypeFunc::I_O);
2750 Node* res = new ProjNode(rt_call, TypeFunc::Parms);
2751
2752 Node* catc = new CatchNode(out_ctl, out_io, 2);
2753 Node* norm = new CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci);
2754 Node* excp = new CatchProjNode(catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci);
2755 Node* r = new RegionNode(3);
2756 Node* mem_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2757 Node* io_phi = new PhiNode(r, Type::ABIO);
2758 r->init_req(1, excp);
2759 mem_phi->init_req(1, out_mem);
2760 io_phi->init_req(1, out_io);
2761
2762 frc._visited.set(norm->_idx);
2763 frc._visited.set(excp->_idx);
2764
2765 ctl->replace_by(norm);
2766 mem->replace_by(out_mem);
2767 io->replace_by(out_io);
2768 ret->replace_by(res);
2769 ex_ctl->replace_by(r);
2770 ex_mem->replace_by(mem_phi);
2771 ex_io->replace_by(io_phi);
2772
2773 r->init_req(2, ex_ctl);
2774 mem_phi->init_req(2, ex_mem);
2775 io_phi->init_req(2, ex_io);
2776
2777 rt_call->init_req(TypeFunc::Control, ctl);
2778 rt_call->init_req(TypeFunc::Memory, mem);
2779 rt_call->init_req(TypeFunc::I_O, io);
2780 rt_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
2781 rt_call->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
2782
2783 rt_call->init_req(TypeFunc::Parms, ret);
2784 // We don't know how many values are returned. This assumes the
2785 // worst case, that all available registers are used.
2786 for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) {
2787 if (domain->field_at(i) == Type::HALF) {
2788 rt_call->init_req(i, top());
2789 continue;
2790 }
2791 Node* proj = new ProjNode(call, i);
2792 rt_call->init_req(i, proj);
2793 }
2794
2795 // We can safepoint at that new call
2796 add_safepoint_edges(rt_call, call->jvms());
2797 }
2798 }
2799
2800 //------------------------------final_graph_reshaping_impl----------------------
2801 // Implement items 1-5 from final_graph_reshaping below.
2802 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
2803
2804 if ( n->outcnt() == 0 ) return; // dead node
2805 uint nop = n->Opcode();
2806
2807 // Check for 2-input instruction with "last use" on right input.
2808 // Swap to left input. Implements item (2).
2809 if( n->req() == 3 && // two-input instruction
2810 n->in(1)->outcnt() > 1 && // left use is NOT a last use
2811 (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
2812 n->in(2)->outcnt() == 1 &&// right use IS a last use
2813 !n->in(2)->is_Con() ) { // right use is not a constant
2814 // Check for commutative opcode
2815 switch( nop ) {
2816 case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL:
2817 case Op_MaxI: case Op_MinI:
2818 case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL:
2894 // Do not count uncommon runtime calls:
2895 // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2896 // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2897 if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
2898 frc.inc_call_count(); // Count the call site
2899 } else { // See if uncommon argument is shared
2900 Node *n = call->in(TypeFunc::Parms);
2901 int nop = n->Opcode();
2902 // Clone shared simple arguments to uncommon calls, item (1).
2903 if( n->outcnt() > 1 &&
2904 !n->is_Proj() &&
2905 nop != Op_CreateEx &&
2906 nop != Op_CheckCastPP &&
2907 nop != Op_DecodeN &&
2908 nop != Op_DecodeNKlass &&
2909 !n->is_Mem() ) {
2910 Node *x = n->clone();
2911 call->set_req( TypeFunc::Parms, x );
2912 }
2913 }
2914 value_type_return_from_mh_intrinsic(call, frc);
2915 break;
2916 }
2917
2918 case Op_StoreD:
2919 case Op_LoadD:
2920 case Op_LoadD_unaligned:
2921 frc.inc_double_count();
2922 goto handle_mem;
2923 case Op_StoreF:
2924 case Op_LoadF:
2925 frc.inc_float_count();
2926 goto handle_mem;
2927
2928 case Op_StoreCM:
2929 {
2930 // Convert OopStore dependence into precedence edge
2931 Node* prec = n->in(MemNode::OopStore);
2932 n->del_req(MemNode::OopStore);
2933 n->add_prec(prec);
2934 eliminate_redundant_card_marks(n);
3647 if (arg0->is_Type() &&
3648 arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
3649 required_outcnt--;
3650 }
3651 } else if (call->entry_point() == OptoRuntime::new_array_Java() &&
3652 call->req() > TypeFunc::Parms+1 &&
3653 call->is_CallStaticJava()) {
3654 // Check for negative array length. In such case, the optimizer has
3655 // detected that the allocation attempt will always result in an
3656 // exception. There is no fall-through projection of this CatchNode .
3657 Node *arg1 = call->in(TypeFunc::Parms+1);
3658 if (arg1->is_Type() &&
3659 arg1->as_Type()->type()->join(TypeInt::POS)->empty()) {
3660 required_outcnt--;
3661 }
3662 }
3663 }
3664 }
3665 // Recheck with a better notion of 'required_outcnt'
3666 if (n->outcnt() != required_outcnt) {
3667 record_method_not_compilable("malformed control flow");
3668 return true; // Not all targets reachable!
3669 }
3670 }
3671 // Check that I actually visited all kids. Unreached kids
3672 // must be infinite loops.
3673 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
3674 if (!frc._visited.test(n->fast_out(j)->_idx)) {
3675 record_method_not_compilable("infinite loop");
3676 return true; // Found unvisited kid; must be unreach
3677 }
3678 }
3679
3680 // If original bytecodes contained a mixture of floats and doubles
3681 // check if the optimizer has made it homogenous, item (3).
3682 if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
3683 frc.get_float_count() > 32 &&
3684 frc.get_double_count() == 0 &&
3685 (10 * frc.get_call_count() < frc.get_float_count()) ) {
3686 set_24_bit_selection_and_mode( false, true );
3687 }
3688
3689 set_java_calls(frc.get_java_call_count());
3690 set_inner_loops(frc.get_inner_loop_count());
3691
3692 // No infinite loops, no reason to bail out.
3693 return false;
3694 }
3695
|
2694 // Accumulate any precedence edges
2695 if (mem->in(i) != NULL) {
2696 n->add_prec(mem->in(i));
2697 }
2698 }
2699 // Everything above this point has been processed.
2700 done = true;
2701 }
2702 // Eliminate the previous StoreCM
2703 prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
2704 assert(mem->outcnt() == 0, "should be dead");
2705 mem->disconnect_inputs(NULL, this);
2706 } else {
2707 prev = mem;
2708 }
2709 mem = prev->in(MemNode::Memory);
2710 }
2711 }
2712 }
2713
2714
2715 //------------------------------final_graph_reshaping_impl----------------------
2716 // Implement items 1-5 from final_graph_reshaping below.
2717 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
2718
2719 if ( n->outcnt() == 0 ) return; // dead node
2720 uint nop = n->Opcode();
2721
2722 // Check for 2-input instruction with "last use" on right input.
2723 // Swap to left input. Implements item (2).
2724 if( n->req() == 3 && // two-input instruction
2725 n->in(1)->outcnt() > 1 && // left use is NOT a last use
2726 (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
2727 n->in(2)->outcnt() == 1 &&// right use IS a last use
2728 !n->in(2)->is_Con() ) { // right use is not a constant
2729 // Check for commutative opcode
2730 switch( nop ) {
2731 case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL:
2732 case Op_MaxI: case Op_MinI:
2733 case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL:
2809 // Do not count uncommon runtime calls:
2810 // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2811 // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2812 if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
2813 frc.inc_call_count(); // Count the call site
2814 } else { // See if uncommon argument is shared
2815 Node *n = call->in(TypeFunc::Parms);
2816 int nop = n->Opcode();
2817 // Clone shared simple arguments to uncommon calls, item (1).
2818 if( n->outcnt() > 1 &&
2819 !n->is_Proj() &&
2820 nop != Op_CreateEx &&
2821 nop != Op_CheckCastPP &&
2822 nop != Op_DecodeN &&
2823 nop != Op_DecodeNKlass &&
2824 !n->is_Mem() ) {
2825 Node *x = n->clone();
2826 call->set_req( TypeFunc::Parms, x );
2827 }
2828 }
2829 break;
2830 }
2831
2832 case Op_StoreD:
2833 case Op_LoadD:
2834 case Op_LoadD_unaligned:
2835 frc.inc_double_count();
2836 goto handle_mem;
2837 case Op_StoreF:
2838 case Op_LoadF:
2839 frc.inc_float_count();
2840 goto handle_mem;
2841
2842 case Op_StoreCM:
2843 {
2844 // Convert OopStore dependence into precedence edge
2845 Node* prec = n->in(MemNode::OopStore);
2846 n->del_req(MemNode::OopStore);
2847 n->add_prec(prec);
2848 eliminate_redundant_card_marks(n);
3561 if (arg0->is_Type() &&
3562 arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
3563 required_outcnt--;
3564 }
3565 } else if (call->entry_point() == OptoRuntime::new_array_Java() &&
3566 call->req() > TypeFunc::Parms+1 &&
3567 call->is_CallStaticJava()) {
3568 // Check for negative array length. In such case, the optimizer has
3569 // detected that the allocation attempt will always result in an
3570 // exception. There is no fall-through projection of this CatchNode .
3571 Node *arg1 = call->in(TypeFunc::Parms+1);
3572 if (arg1->is_Type() &&
3573 arg1->as_Type()->type()->join(TypeInt::POS)->empty()) {
3574 required_outcnt--;
3575 }
3576 }
3577 }
3578 }
3579 // Recheck with a better notion of 'required_outcnt'
3580 if (n->outcnt() != required_outcnt) {
3581 assert(false, "malformed control flow");
3582 record_method_not_compilable("malformed control flow");
3583 return true; // Not all targets reachable!
3584 }
3585 }
3586 // Check that I actually visited all kids. Unreached kids
3587 // must be infinite loops.
3588 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
3589 if (!frc._visited.test(n->fast_out(j)->_idx)) {
3590 record_method_not_compilable("infinite loop");
3591 assert(false, "infinite loop");
3592 return true; // Found unvisited kid; must be unreach
3593 }
3594 }
3595
3596 // If original bytecodes contained a mixture of floats and doubles
3597 // check if the optimizer has made it homogenous, item (3).
3598 if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
3599 frc.get_float_count() > 32 &&
3600 frc.get_double_count() == 0 &&
3601 (10 * frc.get_call_count() < frc.get_float_count()) ) {
3602 set_24_bit_selection_and_mode( false, true );
3603 }
3604
3605 set_java_calls(frc.get_java_call_count());
3606 set_inner_loops(frc.get_inner_loop_count());
3607
3608 // No infinite loops, no reason to bail out.
3609 return false;
3610 }
3611
|