2794 if (nn != NULL) {
2795 // Decode a narrow oop to match address
2796 // [R12 + narrow_oop_reg<<3 + offset]
2797 if (t->isa_oopptr()) {
2798 nn = new DecodeNNode(nn, t);
2799 } else {
2800 nn = new DecodeNKlassNode(nn, t);
2801 }
2802 n->set_req(AddPNode::Base, nn);
2803 n->set_req(AddPNode::Address, nn);
2804 if (addp->outcnt() == 0) {
2805 addp->disconnect_inputs(NULL, this);
2806 }
2807 }
2808 }
2809 }
2810 #endif
2811 break;
2812 }
2813
2814 #ifdef _LP64
2815 case Op_CastPP:
2816 if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
2817 Node* in1 = n->in(1);
2818 const Type* t = n->bottom_type();
2819 Node* new_in1 = in1->clone();
2820 new_in1->as_DecodeN()->set_type(t);
2821
2822 if (!Matcher::narrow_oop_use_complex_address()) {
2823 //
2824 // x86, ARM and friends can handle 2 adds in addressing mode
2825 // and Matcher can fold a DecodeN node into address by using
2826 // a narrow oop directly and do implicit NULL check in address:
2827 //
2828 // [R12 + narrow_oop_reg<<3 + offset]
2829 // NullCheck narrow_oop_reg
2830 //
2831 // On other platforms (Sparc) we have to keep new DecodeN node and
2832 // use it to do implicit NULL check in address:
2833 //
2834 // decode_not_null narrow_oop_reg, base_reg
2835 // [base_reg + offset]
2836 // NullCheck base_reg
2837 //
2838 // Pin the new DecodeN node to non-null path on these platform (Sparc)
2839 // to keep the information to which NULL check the new DecodeN node
2840 // corresponds to use it as value in implicit_null_check().
2841 //
2842 new_in1->set_req(0, n->in(0));
2843 }
2844
2845 n->subsume_by(new_in1, this);
2846 if (in1->outcnt() == 0) {
2847 in1->disconnect_inputs(NULL, this);
2848 }
2849 }
2850 break;
2851
2852 case Op_CmpP:
2853 // Do this transformation here to preserve CmpPNode::sub() and
2854 // other TypePtr related Ideal optimizations (for example, ptr nullness).
2855 if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
2856 Node* in1 = n->in(1);
2857 Node* in2 = n->in(2);
2858 if (!in1->is_DecodeNarrowPtr()) {
2859 in2 = in1;
2860 in1 = n->in(2);
2861 }
2862 assert(in1->is_DecodeNarrowPtr(), "sanity");
2863
2864 Node* new_in2 = NULL;
2865 if (in2->is_DecodeNarrowPtr()) {
2866 assert(in2->Opcode() == in1->Opcode(), "must be same node type");
2867 new_in2 = in2->in(1);
2868 } else if (in2->Opcode() == Op_ConP) {
2869 const Type* t = in2->bottom_type();
2870 if (t == TypePtr::NULL_PTR) {
2871 assert(in1->is_DecodeN(), "compare klass to null?");
|
2794 if (nn != NULL) {
2795 // Decode a narrow oop to match address
2796 // [R12 + narrow_oop_reg<<3 + offset]
2797 if (t->isa_oopptr()) {
2798 nn = new DecodeNNode(nn, t);
2799 } else {
2800 nn = new DecodeNKlassNode(nn, t);
2801 }
2802 n->set_req(AddPNode::Base, nn);
2803 n->set_req(AddPNode::Address, nn);
2804 if (addp->outcnt() == 0) {
2805 addp->disconnect_inputs(NULL, this);
2806 }
2807 }
2808 }
2809 }
2810 #endif
2811 break;
2812 }
2813
2814 case Op_CastPP:
2815 {
2816 // Remove CastPP nodes to gain more freedom during scheduling
2817 // but keep the dependency they encode as control or precedence
2818 // edges (if control is set already) on memory operations.
2819 ResourceMark rm;
2820 Unique_Node_List wq;
2821 wq.push(n);
2822 for (uint next = 0; next < wq.size(); ++next) {
2823 Node *m = wq.at(next);
2824 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2825 Node* use = m->fast_out(i);
2826 if (use->is_Mem() || use->is_EncodeNarrowPtr()) {
2827 if (use->in(0) == NULL) {
2828 use->set_req(0, n->in(0));
2829 } else {
2830 use->add_prec(n->in(0));
2831 }
2832 } else if (use->in(0) == NULL) {
2833 switch(use->Opcode()) {
2834 case Op_AddP:
2835 case Op_DecodeN:
2836 case Op_DecodeNKlass:
2837 case Op_CheckCastPP:
2838 break;
2839 default:
2840 continue;
2841 }
2842 wq.push(use);
2843 }
2844 }
2845 }
2846 }
2847 #ifdef _LP64
2848 if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
2849 Node* in1 = n->in(1);
2850 const Type* t = n->bottom_type();
2851 Node* new_in1 = in1->clone();
2852 new_in1->as_DecodeN()->set_type(t);
2853
2854 if (!Matcher::narrow_oop_use_complex_address()) {
2855 //
2856 // x86, ARM and friends can handle 2 adds in addressing mode
2857 // and Matcher can fold a DecodeN node into address by using
2858 // a narrow oop directly and do implicit NULL check in address:
2859 //
2860 // [R12 + narrow_oop_reg<<3 + offset]
2861 // NullCheck narrow_oop_reg
2862 //
2863 // On other platforms (Sparc) we have to keep new DecodeN node and
2864 // use it to do implicit NULL check in address:
2865 //
2866 // decode_not_null narrow_oop_reg, base_reg
2867 // [base_reg + offset]
2868 // NullCheck base_reg
2869 //
2870 // Pin the new DecodeN node to non-null path on these platform (Sparc)
2871 // to keep the information to which NULL check the new DecodeN node
2872 // corresponds to use it as value in implicit_null_check().
2873 //
2874 new_in1->set_req(0, n->in(0));
2875 }
2876
2877 n->subsume_by(new_in1, this);
2878 if (in1->outcnt() == 0) {
2879 in1->disconnect_inputs(NULL, this);
2880 }
2881 } else {
2882 #endif
2883 n->subsume_by(n->in(1), this);
2884 if (n->outcnt() == 0) {
2885 n->disconnect_inputs(NULL, this);
2886 }
2887 #ifdef _LP64
2888 }
2889 #endif
2890 break;
2891
2892 #ifdef _LP64
2893 case Op_CmpP:
2894 // Do this transformation here to preserve CmpPNode::sub() and
2895 // other TypePtr related Ideal optimizations (for example, ptr nullness).
2896 if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
2897 Node* in1 = n->in(1);
2898 Node* in2 = n->in(2);
2899 if (!in1->is_DecodeNarrowPtr()) {
2900 in2 = in1;
2901 in1 = n->in(2);
2902 }
2903 assert(in1->is_DecodeNarrowPtr(), "sanity");
2904
2905 Node* new_in2 = NULL;
2906 if (in2->is_DecodeNarrowPtr()) {
2907 assert(in2->Opcode() == in1->Opcode(), "must be same node type");
2908 new_in2 = in2->in(1);
2909 } else if (in2->Opcode() == Op_ConP) {
2910 const Type* t = in2->bottom_type();
2911 if (t == TypePtr::NULL_PTR) {
2912 assert(in1->is_DecodeN(), "compare klass to null?");
|