78 # include "adfiles/ad_sparc.hpp"
79 #endif
80 #ifdef TARGET_ARCH_MODEL_zero
81 # include "adfiles/ad_zero.hpp"
82 #endif
83 #ifdef TARGET_ARCH_MODEL_arm
84 # include "adfiles/ad_arm.hpp"
85 #endif
86 #ifdef TARGET_ARCH_MODEL_ppc_32
87 # include "adfiles/ad_ppc_32.hpp"
88 #endif
89 #ifdef TARGET_ARCH_MODEL_ppc_64
90 # include "adfiles/ad_ppc_64.hpp"
91 #endif
92
93
94 // -------------------- Compile::mach_constant_base_node -----------------------
95 // Constant table base node singleton.
96 MachConstantBaseNode* Compile::mach_constant_base_node() {
97 if (_mach_constant_base_node == NULL) {
98 _mach_constant_base_node = new (C) MachConstantBaseNode();
99 _mach_constant_base_node->add_req(C->root());
100 }
101 return _mach_constant_base_node;
102 }
103
104
105 /// Support for intrinsics.
106
107 // Return the index at which m must be inserted (or already exists).
108 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
109 int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
110 #ifdef ASSERT
111 for (int i = 1; i < _intrinsics->length(); i++) {
112 CallGenerator* cg1 = _intrinsics->at(i-1);
113 CallGenerator* cg2 = _intrinsics->at(i);
114 assert(cg1->method() != cg2->method()
115 ? cg1->method() < cg2->method()
116 : cg1->is_virtual() < cg2->is_virtual(),
117 "compiler intrinsics list must stay sorted");
118 }
730
731 // GVN that will be run immediately on new nodes
732 uint estimated_size = method()->code_size()*4+64;
733 estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
734 PhaseGVN gvn(node_arena(), estimated_size);
735 set_initial_gvn(&gvn);
736
737 print_inlining_init();
738 { // Scope for timing the parser
739 TracePhase t3("parse", &_t_parser, true);
740
741 // Put top into the hash table ASAP.
742 initial_gvn()->transform_no_reclaim(top());
743
744 // Set up tf(), start(), and find a CallGenerator.
745 CallGenerator* cg = NULL;
746 if (is_osr_compilation()) {
747 const TypeTuple *domain = StartOSRNode::osr_domain();
748 const TypeTuple *range = TypeTuple::make_range(method()->signature());
749 init_tf(TypeFunc::make(domain, range));
750 StartNode* s = new (this) StartOSRNode(root(), domain);
751 initial_gvn()->set_type_bottom(s);
752 init_start(s);
753 cg = CallGenerator::for_osr(method(), entry_bci());
754 } else {
755 // Normal case.
756 init_tf(TypeFunc::make(method()));
757 StartNode* s = new (this) StartNode(root(), tf()->domain());
758 initial_gvn()->set_type_bottom(s);
759 init_start(s);
760 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
761 // With java.lang.ref.reference.get() we must go through the
762 // intrinsic when G1 is enabled - even when get() is the root
763 // method of the compile - so that, if necessary, the value in
764 // the referent field of the reference object gets recorded by
765 // the pre-barrier code.
766 // Specifically, if G1 is enabled, the value in the referent
767 // field is recorded by the G1 SATB pre barrier. This will
768 // result in the referent being marked live and the reference
769 // object removed from the list of discovered references during
770 // reference processing.
771 cg = find_intrinsic(method(), false);
772 }
773 if (cg == NULL) {
774 float past_uses = method()->interpreter_invocation_count();
775 float expected_uses = past_uses;
776 cg = CallGenerator::for_inline(method(), expected_uses);
777 }
1042 // Prepare for a single compilation
1043 void Compile::Init(int aliaslevel) {
1044 _unique = 0;
1045 _regalloc = NULL;
1046
1047 _tf = NULL; // filled in later
1048 _top = NULL; // cached later
1049 _matcher = NULL; // filled in later
1050 _cfg = NULL; // filled in later
1051
1052 set_24_bit_selection_and_mode(Use24BitFP, false);
1053
1054 _node_note_array = NULL;
1055 _default_node_notes = NULL;
1056
1057 _immutable_memory = NULL; // filled in at first inquiry
1058
1059 // Globally visible Nodes
1060 // First set TOP to NULL to give safe behavior during creation of RootNode
1061 set_cached_top_node(NULL);
1062 set_root(new (this) RootNode());
1063 // Now that you have a Root to point to, create the real TOP
1064 set_cached_top_node( new (this) ConNode(Type::TOP) );
1065 set_recent_alloc(NULL, NULL);
1066
1067 // Create Debug Information Recorder to record scopes, oopmaps, etc.
1068 env()->set_oop_recorder(new OopRecorder(env()->arena()));
1069 env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
1070 env()->set_dependencies(new Dependencies(env()));
1071
1072 _fixed_slots = 0;
1073 set_has_split_ifs(false);
1074 set_has_loops(has_method() && method()->has_loops()); // first approximation
1075 set_has_stringbuilder(false);
1076 set_has_boxed_value(false);
1077 _trap_can_recompile = false; // no traps emitted yet
1078 _major_progress = true; // start out assuming good things will happen
1079 set_has_unsafe_access(false);
1080 set_max_vector_size(0);
1081 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1082 set_decompile_count(0);
1083
1084 set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
2738 if (t->isa_oopptr() || t->isa_klassptr()) {
2739 Node* nn = NULL;
2740
2741 int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;
2742
2743 // Look for existing ConN node of the same exact type.
2744 Node* r = root();
2745 uint cnt = r->outcnt();
2746 for (uint i = 0; i < cnt; i++) {
2747 Node* m = r->raw_out(i);
2748 if (m!= NULL && m->Opcode() == op &&
2749 m->bottom_type()->make_ptr() == t) {
2750 nn = m;
2751 break;
2752 }
2753 }
2754 if (nn != NULL) {
2755 // Decode a narrow oop to match address
2756 // [R12 + narrow_oop_reg<<3 + offset]
2757 if (t->isa_oopptr()) {
2758 nn = new (this) DecodeNNode(nn, t);
2759 } else {
2760 nn = new (this) DecodeNKlassNode(nn, t);
2761 }
2762 n->set_req(AddPNode::Base, nn);
2763 n->set_req(AddPNode::Address, nn);
2764 if (addp->outcnt() == 0) {
2765 addp->disconnect_inputs(NULL, this);
2766 }
2767 }
2768 }
2769 }
2770 #endif
2771 break;
2772 }
2773
2774 #ifdef _LP64
2775 case Op_CastPP:
2776 if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
2777 Node* in1 = n->in(1);
2778 const Type* t = n->bottom_type();
2779 Node* new_in1 = in1->clone();
2780 new_in1->as_DecodeN()->set_type(t);
2861 // on x86:
2862 //
2863 // Load_narrow_oop memory, narrow_oop_reg
2864 // Load [R12 + narrow_oop_reg<<3 + offset], val_reg
2865 // NullCheck narrow_oop_reg
2866 //
2867 // and on sparc:
2868 //
2869 // Load_narrow_oop memory, narrow_oop_reg
2870 // decode_not_null narrow_oop_reg, base_reg
2871 // Load [base_reg + offset], val_reg
2872 // NullCheck base_reg
2873 //
2874 } else if (t->isa_oopptr()) {
2875 new_in2 = ConNode::make(this, t->make_narrowoop());
2876 } else if (t->isa_klassptr()) {
2877 new_in2 = ConNode::make(this, t->make_narrowklass());
2878 }
2879 }
2880 if (new_in2 != NULL) {
2881 Node* cmpN = new (this) CmpNNode(in1->in(1), new_in2);
2882 n->subsume_by(cmpN, this);
2883 if (in1->outcnt() == 0) {
2884 in1->disconnect_inputs(NULL, this);
2885 }
2886 if (in2->outcnt() == 0) {
2887 in2->disconnect_inputs(NULL, this);
2888 }
2889 }
2890 }
2891 break;
2892
2893 case Op_DecodeN:
2894 case Op_DecodeNKlass:
2895 assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
2896 // DecodeN could be pinned when it can't be fold into
2897 // an address expression, see the code for Op_CastPP above.
2898 assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
2899 break;
2900
2901 case Op_EncodeP:
2960 if (unique_in != NULL) {
2961 n->subsume_by(unique_in, this);
2962 }
2963 }
2964 break;
2965
2966 #endif
2967
2968 case Op_ModI:
2969 if (UseDivMod) {
2970 // Check if a%b and a/b both exist
2971 Node* d = n->find_similar(Op_DivI);
2972 if (d) {
2973 // Replace them with a fused divmod if supported
2974 if (Matcher::has_match_rule(Op_DivModI)) {
2975 DivModINode* divmod = DivModINode::make(this, n);
2976 d->subsume_by(divmod->div_proj(), this);
2977 n->subsume_by(divmod->mod_proj(), this);
2978 } else {
2979 // replace a%b with a-((a/b)*b)
2980 Node* mult = new (this) MulINode(d, d->in(2));
2981 Node* sub = new (this) SubINode(d->in(1), mult);
2982 n->subsume_by(sub, this);
2983 }
2984 }
2985 }
2986 break;
2987
2988 case Op_ModL:
2989 if (UseDivMod) {
2990 // Check if a%b and a/b both exist
2991 Node* d = n->find_similar(Op_DivL);
2992 if (d) {
2993 // Replace them with a fused divmod if supported
2994 if (Matcher::has_match_rule(Op_DivModL)) {
2995 DivModLNode* divmod = DivModLNode::make(this, n);
2996 d->subsume_by(divmod->div_proj(), this);
2997 n->subsume_by(divmod->mod_proj(), this);
2998 } else {
2999 // replace a%b with a-((a/b)*b)
3000 Node* mult = new (this) MulLNode(d, d->in(2));
3001 Node* sub = new (this) SubLNode(d->in(1), mult);
3002 n->subsume_by(sub, this);
3003 }
3004 }
3005 }
3006 break;
3007
3008 case Op_LoadVector:
3009 case Op_StoreVector:
3010 break;
3011
3012 case Op_PackB:
3013 case Op_PackS:
3014 case Op_PackI:
3015 case Op_PackF:
3016 case Op_PackL:
3017 case Op_PackD:
3018 if (n->req()-1 > 2) {
3019 // Replace many operand PackNodes with a binary tree for matching
3020 PackNode* p = (PackNode*) n;
3021 Node* btp = p->binary_tree_pack(this, 1, n->req());
3030 break;
3031 case Op_LShiftI:
3032 case Op_RShiftI:
3033 case Op_URShiftI:
3034 case Op_LShiftL:
3035 case Op_RShiftL:
3036 case Op_URShiftL:
3037 if (Matcher::need_masked_shift_count) {
3038 // The cpu's shift instructions don't restrict the count to the
3039 // lower 5/6 bits. We need to do the masking ourselves.
3040 Node* in2 = n->in(2);
3041 juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
3042 const TypeInt* t = in2->find_int_type();
3043 if (t != NULL && t->is_con()) {
3044 juint shift = t->get_con();
3045 if (shift > mask) { // Unsigned cmp
3046 n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask)));
3047 }
3048 } else {
3049 if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
3050 Node* shift = new (this) AndINode(in2, ConNode::make(this, TypeInt::make(mask)));
3051 n->set_req(2, shift);
3052 }
3053 }
3054 if (in2->outcnt() == 0) { // Remove dead node
3055 in2->disconnect_inputs(NULL, this);
3056 }
3057 }
3058 break;
3059 case Op_MemBarStoreStore:
3060 case Op_MemBarRelease:
3061 // Break the link with AllocateNode: it is no longer useful and
3062 // confuses register allocation.
3063 if (n->req() > MemBarNode::Precedent) {
3064 n->set_req(MemBarNode::Precedent, top());
3065 }
3066 break;
3067 default:
3068 assert( !n->is_Call(), "" );
3069 assert( !n->is_Mem(), "" );
3070 break;
|
78 # include "adfiles/ad_sparc.hpp"
79 #endif
80 #ifdef TARGET_ARCH_MODEL_zero
81 # include "adfiles/ad_zero.hpp"
82 #endif
83 #ifdef TARGET_ARCH_MODEL_arm
84 # include "adfiles/ad_arm.hpp"
85 #endif
86 #ifdef TARGET_ARCH_MODEL_ppc_32
87 # include "adfiles/ad_ppc_32.hpp"
88 #endif
89 #ifdef TARGET_ARCH_MODEL_ppc_64
90 # include "adfiles/ad_ppc_64.hpp"
91 #endif
92
93
94 // -------------------- Compile::mach_constant_base_node -----------------------
95 // Constant table base node singleton.
96 MachConstantBaseNode* Compile::mach_constant_base_node() {
97 if (_mach_constant_base_node == NULL) {
98 _mach_constant_base_node = new MachConstantBaseNode();
99 _mach_constant_base_node->add_req(C->root());
100 }
101 return _mach_constant_base_node;
102 }
103
104
105 /// Support for intrinsics.
106
107 // Return the index at which m must be inserted (or already exists).
108 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
109 int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
110 #ifdef ASSERT
111 for (int i = 1; i < _intrinsics->length(); i++) {
112 CallGenerator* cg1 = _intrinsics->at(i-1);
113 CallGenerator* cg2 = _intrinsics->at(i);
114 assert(cg1->method() != cg2->method()
115 ? cg1->method() < cg2->method()
116 : cg1->is_virtual() < cg2->is_virtual(),
117 "compiler intrinsics list must stay sorted");
118 }
730
731 // GVN that will be run immediately on new nodes
732 uint estimated_size = method()->code_size()*4+64;
733 estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
734 PhaseGVN gvn(node_arena(), estimated_size);
735 set_initial_gvn(&gvn);
736
737 print_inlining_init();
738 { // Scope for timing the parser
739 TracePhase t3("parse", &_t_parser, true);
740
741 // Put top into the hash table ASAP.
742 initial_gvn()->transform_no_reclaim(top());
743
744 // Set up tf(), start(), and find a CallGenerator.
745 CallGenerator* cg = NULL;
746 if (is_osr_compilation()) {
747 const TypeTuple *domain = StartOSRNode::osr_domain();
748 const TypeTuple *range = TypeTuple::make_range(method()->signature());
749 init_tf(TypeFunc::make(domain, range));
750 StartNode* s = new StartOSRNode(root(), domain);
751 initial_gvn()->set_type_bottom(s);
752 init_start(s);
753 cg = CallGenerator::for_osr(method(), entry_bci());
754 } else {
755 // Normal case.
756 init_tf(TypeFunc::make(method()));
757 StartNode* s = new StartNode(root(), tf()->domain());
758 initial_gvn()->set_type_bottom(s);
759 init_start(s);
760 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
761 // With java.lang.ref.reference.get() we must go through the
762 // intrinsic when G1 is enabled - even when get() is the root
763 // method of the compile - so that, if necessary, the value in
764 // the referent field of the reference object gets recorded by
765 // the pre-barrier code.
766 // Specifically, if G1 is enabled, the value in the referent
767 // field is recorded by the G1 SATB pre barrier. This will
768 // result in the referent being marked live and the reference
769 // object removed from the list of discovered references during
770 // reference processing.
771 cg = find_intrinsic(method(), false);
772 }
773 if (cg == NULL) {
774 float past_uses = method()->interpreter_invocation_count();
775 float expected_uses = past_uses;
776 cg = CallGenerator::for_inline(method(), expected_uses);
777 }
1042 // Prepare for a single compilation
1043 void Compile::Init(int aliaslevel) {
1044 _unique = 0;
1045 _regalloc = NULL;
1046
1047 _tf = NULL; // filled in later
1048 _top = NULL; // cached later
1049 _matcher = NULL; // filled in later
1050 _cfg = NULL; // filled in later
1051
1052 set_24_bit_selection_and_mode(Use24BitFP, false);
1053
1054 _node_note_array = NULL;
1055 _default_node_notes = NULL;
1056
1057 _immutable_memory = NULL; // filled in at first inquiry
1058
1059 // Globally visible Nodes
1060 // First set TOP to NULL to give safe behavior during creation of RootNode
1061 set_cached_top_node(NULL);
1062 set_root(new RootNode());
1063 // Now that you have a Root to point to, create the real TOP
1064 set_cached_top_node( new ConNode(Type::TOP) );
1065 set_recent_alloc(NULL, NULL);
1066
1067 // Create Debug Information Recorder to record scopes, oopmaps, etc.
1068 env()->set_oop_recorder(new OopRecorder(env()->arena()));
1069 env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
1070 env()->set_dependencies(new Dependencies(env()));
1071
1072 _fixed_slots = 0;
1073 set_has_split_ifs(false);
1074 set_has_loops(has_method() && method()->has_loops()); // first approximation
1075 set_has_stringbuilder(false);
1076 set_has_boxed_value(false);
1077 _trap_can_recompile = false; // no traps emitted yet
1078 _major_progress = true; // start out assuming good things will happen
1079 set_has_unsafe_access(false);
1080 set_max_vector_size(0);
1081 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1082 set_decompile_count(0);
1083
1084 set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
2738 if (t->isa_oopptr() || t->isa_klassptr()) {
2739 Node* nn = NULL;
2740
2741 int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;
2742
2743 // Look for existing ConN node of the same exact type.
2744 Node* r = root();
2745 uint cnt = r->outcnt();
2746 for (uint i = 0; i < cnt; i++) {
2747 Node* m = r->raw_out(i);
2748 if (m!= NULL && m->Opcode() == op &&
2749 m->bottom_type()->make_ptr() == t) {
2750 nn = m;
2751 break;
2752 }
2753 }
2754 if (nn != NULL) {
2755 // Decode a narrow oop to match address
2756 // [R12 + narrow_oop_reg<<3 + offset]
2757 if (t->isa_oopptr()) {
2758 nn = new DecodeNNode(nn, t);
2759 } else {
2760 nn = new DecodeNKlassNode(nn, t);
2761 }
2762 n->set_req(AddPNode::Base, nn);
2763 n->set_req(AddPNode::Address, nn);
2764 if (addp->outcnt() == 0) {
2765 addp->disconnect_inputs(NULL, this);
2766 }
2767 }
2768 }
2769 }
2770 #endif
2771 break;
2772 }
2773
2774 #ifdef _LP64
2775 case Op_CastPP:
2776 if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
2777 Node* in1 = n->in(1);
2778 const Type* t = n->bottom_type();
2779 Node* new_in1 = in1->clone();
2780 new_in1->as_DecodeN()->set_type(t);
2861 // on x86:
2862 //
2863 // Load_narrow_oop memory, narrow_oop_reg
2864 // Load [R12 + narrow_oop_reg<<3 + offset], val_reg
2865 // NullCheck narrow_oop_reg
2866 //
2867 // and on sparc:
2868 //
2869 // Load_narrow_oop memory, narrow_oop_reg
2870 // decode_not_null narrow_oop_reg, base_reg
2871 // Load [base_reg + offset], val_reg
2872 // NullCheck base_reg
2873 //
2874 } else if (t->isa_oopptr()) {
2875 new_in2 = ConNode::make(this, t->make_narrowoop());
2876 } else if (t->isa_klassptr()) {
2877 new_in2 = ConNode::make(this, t->make_narrowklass());
2878 }
2879 }
2880 if (new_in2 != NULL) {
2881 Node* cmpN = new CmpNNode(in1->in(1), new_in2);
2882 n->subsume_by(cmpN, this);
2883 if (in1->outcnt() == 0) {
2884 in1->disconnect_inputs(NULL, this);
2885 }
2886 if (in2->outcnt() == 0) {
2887 in2->disconnect_inputs(NULL, this);
2888 }
2889 }
2890 }
2891 break;
2892
2893 case Op_DecodeN:
2894 case Op_DecodeNKlass:
2895 assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
2896 // DecodeN could be pinned when it can't be fold into
2897 // an address expression, see the code for Op_CastPP above.
2898 assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
2899 break;
2900
2901 case Op_EncodeP:
2960 if (unique_in != NULL) {
2961 n->subsume_by(unique_in, this);
2962 }
2963 }
2964 break;
2965
2966 #endif
2967
2968 case Op_ModI:
2969 if (UseDivMod) {
2970 // Check if a%b and a/b both exist
2971 Node* d = n->find_similar(Op_DivI);
2972 if (d) {
2973 // Replace them with a fused divmod if supported
2974 if (Matcher::has_match_rule(Op_DivModI)) {
2975 DivModINode* divmod = DivModINode::make(this, n);
2976 d->subsume_by(divmod->div_proj(), this);
2977 n->subsume_by(divmod->mod_proj(), this);
2978 } else {
2979 // replace a%b with a-((a/b)*b)
2980 Node* mult = new MulINode(d, d->in(2));
2981 Node* sub = new SubINode(d->in(1), mult);
2982 n->subsume_by(sub, this);
2983 }
2984 }
2985 }
2986 break;
2987
2988 case Op_ModL:
2989 if (UseDivMod) {
2990 // Check if a%b and a/b both exist
2991 Node* d = n->find_similar(Op_DivL);
2992 if (d) {
2993 // Replace them with a fused divmod if supported
2994 if (Matcher::has_match_rule(Op_DivModL)) {
2995 DivModLNode* divmod = DivModLNode::make(this, n);
2996 d->subsume_by(divmod->div_proj(), this);
2997 n->subsume_by(divmod->mod_proj(), this);
2998 } else {
2999 // replace a%b with a-((a/b)*b)
3000 Node* mult = new MulLNode(d, d->in(2));
3001 Node* sub = new SubLNode(d->in(1), mult);
3002 n->subsume_by(sub, this);
3003 }
3004 }
3005 }
3006 break;
3007
3008 case Op_LoadVector:
3009 case Op_StoreVector:
3010 break;
3011
3012 case Op_PackB:
3013 case Op_PackS:
3014 case Op_PackI:
3015 case Op_PackF:
3016 case Op_PackL:
3017 case Op_PackD:
3018 if (n->req()-1 > 2) {
3019 // Replace many operand PackNodes with a binary tree for matching
3020 PackNode* p = (PackNode*) n;
3021 Node* btp = p->binary_tree_pack(this, 1, n->req());
3030 break;
3031 case Op_LShiftI:
3032 case Op_RShiftI:
3033 case Op_URShiftI:
3034 case Op_LShiftL:
3035 case Op_RShiftL:
3036 case Op_URShiftL:
3037 if (Matcher::need_masked_shift_count) {
3038 // The cpu's shift instructions don't restrict the count to the
3039 // lower 5/6 bits. We need to do the masking ourselves.
3040 Node* in2 = n->in(2);
3041 juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
3042 const TypeInt* t = in2->find_int_type();
3043 if (t != NULL && t->is_con()) {
3044 juint shift = t->get_con();
3045 if (shift > mask) { // Unsigned cmp
3046 n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask)));
3047 }
3048 } else {
3049 if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
3050 Node* shift = new AndINode(in2, ConNode::make(this, TypeInt::make(mask)));
3051 n->set_req(2, shift);
3052 }
3053 }
3054 if (in2->outcnt() == 0) { // Remove dead node
3055 in2->disconnect_inputs(NULL, this);
3056 }
3057 }
3058 break;
3059 case Op_MemBarStoreStore:
3060 case Op_MemBarRelease:
3061 // Break the link with AllocateNode: it is no longer useful and
3062 // confuses register allocation.
3063 if (n->req() > MemBarNode::Precedent) {
3064 n->set_req(MemBarNode::Precedent, top());
3065 }
3066 break;
3067 default:
3068 assert( !n->is_Call(), "" );
3069 assert( !n->is_Mem(), "" );
3070 break;
|