2614 handle_mem:
2615 #ifdef ASSERT
2616 if( VerifyOptoOopOffsets ) {
2617 assert( n->is_Mem(), "" );
2618 MemNode *mem = (MemNode*)n;
2619 // Check to see if address types have grounded out somehow.
2620 const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
2621 assert( !tp || oop_offset_is_sane(tp), "" );
2622 }
2623 #endif
2624 break;
2625 }
2626
2627 case Op_AddP: { // Assert sane base pointers
2628 Node *addp = n->in(AddPNode::Address);
2629 assert( !addp->is_AddP() ||
2630 addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
2631 addp->in(AddPNode::Base) == n->in(AddPNode::Base),
2632 "Base pointers must match" );
2633 #ifdef _LP64
2634 if ((UseCompressedOops || UseCompressedKlassPointers) &&
2635 addp->Opcode() == Op_ConP &&
2636 addp == n->in(AddPNode::Base) &&
2637 n->in(AddPNode::Offset)->is_Con()) {
2638 // Use addressing with narrow klass to load with offset on x86.
2639 // On sparc loading 32-bits constant and decoding it have less
2640 // instructions (4) then load 64-bits constant (7).
2641 // Do this transformation here since IGVN will convert ConN back to ConP.
2642 const Type* t = addp->bottom_type();
2643 if (t->isa_oopptr() || t->isa_klassptr()) {
2644 Node* nn = NULL;
2645
2646 int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;
2647
2648 // Look for existing ConN node of the same exact type.
2649 Node* r = root();
2650 uint cnt = r->outcnt();
2651 for (uint i = 0; i < cnt; i++) {
2652 Node* m = r->raw_out(i);
2653 if (m!= NULL && m->Opcode() == op &&
2654 m->bottom_type()->make_ptr() == t) {
3001 sfpt.push(m);
3002 cnt = m->req();
3003 nstack.push(n, i); // put on stack parent and next input's index
3004 n = m;
3005 i = 0;
3006 }
3007 } else {
3008 // Now do post-visit work
3009 final_graph_reshaping_impl( n, frc );
3010 if (nstack.is_empty())
3011 break; // finished
3012 n = nstack.node(); // Get node from stack
3013 cnt = n->req();
3014 i = nstack.index();
3015 nstack.pop(); // Shift to the next node on stack
3016 }
3017 }
3018
3019 // Skip next transformation if compressed oops are not used.
3020 if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
3021 (!UseCompressedOops && !UseCompressedKlassPointers))
3022 return;
3023
3024 // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
3025 // It could be done for an uncommon traps or any safepoints/calls
3026 // if the DecodeN/DecodeNKlass node is referenced only in a debug info.
3027 while (sfpt.size() > 0) {
3028 n = sfpt.pop();
3029 JVMState *jvms = n->as_SafePoint()->jvms();
3030 assert(jvms != NULL, "sanity");
3031 int start = jvms->debug_start();
3032 int end = n->req();
3033 bool is_uncommon = (n->is_CallStaticJava() &&
3034 n->as_CallStaticJava()->uncommon_trap_request() != 0);
3035 for (int j = start; j < end; j++) {
3036 Node* in = n->in(j);
3037 if (in->is_DecodeNarrowPtr()) {
3038 bool safe_to_skip = true;
3039 if (!is_uncommon ) {
3040 // Is it safe to skip?
3041 for (uint i = 0; i < in->outcnt(); i++) {
|
2614 handle_mem:
2615 #ifdef ASSERT
2616 if( VerifyOptoOopOffsets ) {
2617 assert( n->is_Mem(), "" );
2618 MemNode *mem = (MemNode*)n;
2619 // Check to see if address types have grounded out somehow.
2620 const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
2621 assert( !tp || oop_offset_is_sane(tp), "" );
2622 }
2623 #endif
2624 break;
2625 }
2626
2627 case Op_AddP: { // Assert sane base pointers
2628 Node *addp = n->in(AddPNode::Address);
2629 assert( !addp->is_AddP() ||
2630 addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
2631 addp->in(AddPNode::Base) == n->in(AddPNode::Base),
2632 "Base pointers must match" );
2633 #ifdef _LP64
2634 if ((UseCompressedOops || UseCompressedClassPointers) &&
2635 addp->Opcode() == Op_ConP &&
2636 addp == n->in(AddPNode::Base) &&
2637 n->in(AddPNode::Offset)->is_Con()) {
2638 // Use addressing with narrow klass to load with offset on x86.
2639 // On sparc loading 32-bits constant and decoding it have less
2640 // instructions (4) then load 64-bits constant (7).
2641 // Do this transformation here since IGVN will convert ConN back to ConP.
2642 const Type* t = addp->bottom_type();
2643 if (t->isa_oopptr() || t->isa_klassptr()) {
2644 Node* nn = NULL;
2645
2646 int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;
2647
2648 // Look for existing ConN node of the same exact type.
2649 Node* r = root();
2650 uint cnt = r->outcnt();
2651 for (uint i = 0; i < cnt; i++) {
2652 Node* m = r->raw_out(i);
2653 if (m!= NULL && m->Opcode() == op &&
2654 m->bottom_type()->make_ptr() == t) {
3001 sfpt.push(m);
3002 cnt = m->req();
3003 nstack.push(n, i); // put on stack parent and next input's index
3004 n = m;
3005 i = 0;
3006 }
3007 } else {
3008 // Now do post-visit work
3009 final_graph_reshaping_impl( n, frc );
3010 if (nstack.is_empty())
3011 break; // finished
3012 n = nstack.node(); // Get node from stack
3013 cnt = n->req();
3014 i = nstack.index();
3015 nstack.pop(); // Shift to the next node on stack
3016 }
3017 }
3018
3019 // Skip next transformation if compressed oops are not used.
3020 if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
3021 (!UseCompressedOops && !UseCompressedClassPointers))
3022 return;
3023
3024 // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
3025 // It could be done for an uncommon traps or any safepoints/calls
3026 // if the DecodeN/DecodeNKlass node is referenced only in a debug info.
3027 while (sfpt.size() > 0) {
3028 n = sfpt.pop();
3029 JVMState *jvms = n->as_SafePoint()->jvms();
3030 assert(jvms != NULL, "sanity");
3031 int start = jvms->debug_start();
3032 int end = n->req();
3033 bool is_uncommon = (n->is_CallStaticJava() &&
3034 n->as_CallStaticJava()->uncommon_trap_request() != 0);
3035 for (int j = start; j < end; j++) {
3036 Node* in = n->in(j);
3037 if (in->is_DecodeNarrowPtr()) {
3038 bool safe_to_skip = true;
3039 if (!is_uncommon ) {
3040 // Is it safe to skip?
3041 for (uint i = 0; i < in->outcnt(); i++) {
|