src/share/vm/opto/memnode.cpp

Print this page




 655     //            TypeRawPtr::BOTTOM.  Needs to be investigated.
 656     if (cross_check != NULL &&
 657         cross_check != TypePtr::BOTTOM &&
 658         cross_check != TypeRawPtr::BOTTOM) {
 659       // Recheck the alias index, to see if it has changed (due to a bug).
 660       Compile* C = Compile::current();
 661       assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
 662              "must stay in the original alias category");
 663       // The type of the address must be contained in the adr_type,
 664       // disregarding "null"-ness.
 665       // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
 666       const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
 667       assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
 668              "real address must not escape from expected memory type");
 669     }
 670     #endif
 671     return tp;
 672   }
 673 }
 674 
 675 //------------------------adr_phi_is_loop_invariant----------------------------
 676 // A helper function for Ideal_DU_postCCP to check if a Phi in a counted
 677 // loop is loop invariant. Make a quick traversal of Phi and associated
 678 // CastPP nodes, looking to see if they are a closed group within the loop.
 679 bool MemNode::adr_phi_is_loop_invariant(Node* adr_phi, Node* cast) {
 680   // The idea is that the phi-nest must boil down to only CastPP nodes
 681   // with the same data. This implies that any path into the loop already
 682   // includes such a CastPP, and so the original cast, whatever its input,
 683   // must be covered by an equivalent cast, with an earlier control input.
 684   ResourceMark rm;
 685 
 686   // The loop entry input of the phi should be the unique dominating
 687   // node for every Phi/CastPP in the loop.
 688   Unique_Node_List closure;
 689   closure.push(adr_phi->in(LoopNode::EntryControl));
 690 
 691   // Add the phi node and the cast to the worklist.
 692   Unique_Node_List worklist;
 693   worklist.push(adr_phi);
 694   if( cast != NULL ){
 695     if( !cast->is_ConstraintCast() ) return false;
 696     worklist.push(cast);
 697   }
 698 
 699   // Begin recursive walk of phi nodes.
 700   while( worklist.size() ){
 701     // Take a node off the worklist
 702     Node *n = worklist.pop();
 703     if( !closure.member(n) ){
 704       // Add it to the closure.
 705       closure.push(n);
 706       // Make a sanity check to ensure we don't waste too much time here.
 707       if( closure.size() > 20) return false;
 708       // This node is OK if:
 709       //  - it is a cast of an identical value
 710       //  - or it is a phi node (then we add its inputs to the worklist)
 711       // Otherwise, the node is not OK, and we presume the cast is not invariant
 712       if( n->is_ConstraintCast() ){
 713         worklist.push(n->in(1));
 714       } else if( n->is_Phi() ) {
 715         for( uint i = 1; i < n->req(); i++ ) {
 716           worklist.push(n->in(i));
 717         }
 718       } else {
 719         return false;
 720       }
 721     }
 722   }
 723 
 724   // Quit when the worklist is empty, and we've found no offending nodes.
 725   return true;
 726 }
 727 
 728 //------------------------------Ideal_DU_postCCP-------------------------------
 729 // Find any cast-away of null-ness and keep its control.  Null cast-aways are
 730 // going away in this pass and we need to make this memory op depend on the
 731 // gating null check.
 732 Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
 733   return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address));
 734 }
 735 
 736 // I tried to leave the CastPP's in.  This makes the graph more accurate in
 737 // some sense; we get to keep around the knowledge that an oop is not-null
 738 // after some test.  Alas, the CastPP's interfere with GVN (some values are
 739 // the regular oop, some are the CastPP of the oop, all merge at Phi's which
 740 // cannot collapse, etc).  This cost us 10% on SpecJVM, even when I removed
 741 // some of the more trivial cases in the optimizer.  Removing more useless
 742 // Phi's started allowing Loads to illegally float above null checks.  I gave
 743 // up on this approach.  CNC 10/20/2000
 744 // This static method may be called not from MemNode (EncodePNode calls it).
 745 // Only the control edge of the node 'n' might be updated.
 746 Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) {
 747   Node *skipped_cast = NULL;
 748   // Need a null check?  Regular static accesses do not because they are
 749   // from constant addresses.  Array ops are gated by the range check (which
 750   // always includes a NULL check).  Just check field ops.
 751   if( n->in(MemNode::Control) == NULL ) {
 752     // Scan upwards for the highest location we can place this memory op.
 753     while( true ) {
 754       switch( adr->Opcode() ) {
 755 
 756       case Op_AddP:             // No change to NULL-ness, so peek thru AddP's
 757         adr = adr->in(AddPNode::Base);
 758         continue;
 759 
 760       case Op_DecodeN:         // No change to NULL-ness, so peek thru
 761       case Op_DecodeNKlass:
 762         adr = adr->in(1);
 763         continue;
 764 
 765       case Op_EncodeP:
 766       case Op_EncodePKlass:
 767         // EncodeP node's control edge could be set by this method
 768         // when EncodeP node depends on CastPP node.
 769         //
 770         // Use its control edge for memory op because EncodeP may go away
 771         // later when it is folded with following or preceding DecodeN node.
 772         if (adr->in(0) == NULL) {
 773           // Keep looking for cast nodes.
 774           adr = adr->in(1);
 775           continue;
 776         }
 777         ccp->hash_delete(n);
 778         n->set_req(MemNode::Control, adr->in(0));
 779         ccp->hash_insert(n);
 780         return n;
 781 
 782       case Op_CastPP:
 783         // If the CastPP is useless, just peek on through it.
 784         if( ccp->type(adr) == ccp->type(adr->in(1)) ) {
 785           // Remember the cast that we've peeked though. If we peek
 786           // through more than one, then we end up remembering the highest
 787           // one, that is, if in a loop, the one closest to the top.
 788           skipped_cast = adr;
 789           adr = adr->in(1);
 790           continue;
 791         }
 792         // CastPP is going away in this pass!  We need this memory op to be
 793         // control-dependent on the test that is guarding the CastPP.
 794         ccp->hash_delete(n);
 795         n->set_req(MemNode::Control, adr->in(0));
 796         ccp->hash_insert(n);
 797         return n;
 798 
 799       case Op_Phi:
 800         // Attempt to float above a Phi to some dominating point.
 801         if (adr->in(0) != NULL && adr->in(0)->is_CountedLoop()) {
 802           // If we've already peeked through a Cast (which could have set the
 803           // control), we can't float above a Phi, because the skipped Cast
 804           // may not be loop invariant.
 805           if (adr_phi_is_loop_invariant(adr, skipped_cast)) {
 806             adr = adr->in(1);
 807             continue;
 808           }
 809         }
 810 
 811         // Intentional fallthrough!
 812 
 813         // No obvious dominating point.  The mem op is pinned below the Phi
 814         // by the Phi itself.  If the Phi goes away (no true value is merged)
 815         // then the mem op can float, but not indefinitely.  It must be pinned
 816         // behind the controls leading to the Phi.
 817       case Op_CheckCastPP:
 818         // These usually stick around to change address type, however a
 819         // useless one can be elided and we still need to pick up a control edge
 820         if (adr->in(0) == NULL) {
 821           // This CheckCastPP node has NO control and is likely useless. But we
 822           // need check further up the ancestor chain for a control input to keep
 823           // the node in place. 4959717.
 824           skipped_cast = adr;
 825           adr = adr->in(1);
 826           continue;
 827         }
 828         ccp->hash_delete(n);
 829         n->set_req(MemNode::Control, adr->in(0));
 830         ccp->hash_insert(n);
 831         return n;
 832 
 833         // List of "safe" opcodes; those that implicitly block the memory
 834         // op below any null check.
 835       case Op_CastX2P:          // no null checks on native pointers
 836       case Op_Parm:             // 'this' pointer is not null
 837       case Op_LoadP:            // Loading from within a klass
 838       case Op_LoadN:            // Loading from within a klass
 839       case Op_LoadKlass:        // Loading from within a klass
 840       case Op_LoadNKlass:       // Loading from within a klass
 841       case Op_ConP:             // Loading from a klass
 842       case Op_ConN:             // Loading from a klass
 843       case Op_ConNKlass:        // Loading from a klass
 844       case Op_CreateEx:         // Sucking up the guts of an exception oop
 845       case Op_Con:              // Reading from TLS
 846       case Op_CMoveP:           // CMoveP is pinned
 847       case Op_CMoveN:           // CMoveN is pinned
 848         break;                  // No progress
 849 
 850       case Op_Proj:             // Direct call to an allocation routine
 851       case Op_SCMemProj:        // Memory state from store conditional ops
 852 #ifdef ASSERT
 853         {
 854           assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value");
 855           const Node* call = adr->in(0);
 856           if (call->is_CallJava()) {
 857             const CallJavaNode* call_java = call->as_CallJava();
 858             const TypeTuple *r = call_java->tf()->range();
 859             assert(r->cnt() > TypeFunc::Parms, "must return value");
 860             const Type* ret_type = r->field_at(TypeFunc::Parms);
 861             assert(ret_type && ret_type->isa_ptr(), "must return pointer");
 862             // We further presume that this is one of
 863             // new_instance_Java, new_array_Java, or
 864             // the like, but do not assert for this.
 865           } else if (call->is_Allocate()) {
 866             // similar case to new_instance_Java, etc.
 867           } else if (!call->is_CallLeaf()) {
 868             // Projections from fetch_oop (OSR) are allowed as well.
 869             ShouldNotReachHere();
 870           }
 871         }
 872 #endif
 873         break;
 874       default:
 875         ShouldNotReachHere();
 876       }
 877       break;
 878     }
 879   }
 880 
 881   return  NULL;               // No progress
 882 }
 883 
 884 
 885 //=============================================================================
 886 // Should LoadNode::Ideal() attempt to remove control edges?
 887 bool LoadNode::can_remove_control() const {
 888   return true;
 889 }
 890 uint LoadNode::size_of() const { return sizeof(*this); }
 891 uint LoadNode::cmp( const Node &n ) const
 892 { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
 893 const Type *LoadNode::bottom_type() const { return _type; }
 894 uint LoadNode::ideal_reg() const {
 895   return _type->ideal_reg();
 896 }
 897 
 898 #ifndef PRODUCT
 899 void LoadNode::dump_spec(outputStream *st) const {
 900   MemNode::dump_spec(st);
 901   if( !Verbose && !WizardMode ) {
 902     // standard dump does this in Verbose and WizardMode
 903     st->print(" #"); _type->dump_on(st);
 904   }




 655     //            TypeRawPtr::BOTTOM.  Needs to be investigated.
 656     if (cross_check != NULL &&
 657         cross_check != TypePtr::BOTTOM &&
 658         cross_check != TypeRawPtr::BOTTOM) {
 659       // Recheck the alias index, to see if it has changed (due to a bug).
 660       Compile* C = Compile::current();
 661       assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
 662              "must stay in the original alias category");
 663       // The type of the address must be contained in the adr_type,
 664       // disregarding "null"-ness.
 665       // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
 666       const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
 667       assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
 668              "real address must not escape from expected memory type");
 669     }
 670     #endif
 671     return tp;
 672   }
 673 }
 674 


















































































































































































































 675 //=============================================================================
 676 // Should LoadNode::Ideal() attempt to remove control edges?
 677 bool LoadNode::can_remove_control() const {
 678   return true;
 679 }
 680 uint LoadNode::size_of() const { return sizeof(*this); }
 681 uint LoadNode::cmp( const Node &n ) const
 682 { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
 683 const Type *LoadNode::bottom_type() const { return _type; }
 684 uint LoadNode::ideal_reg() const {
 685   return _type->ideal_reg();
 686 }
 687 
 688 #ifndef PRODUCT
 689 void LoadNode::dump_spec(outputStream *st) const {
 690   MemNode::dump_spec(st);
 691   if( !Verbose && !WizardMode ) {
 692     // standard dump does this in Verbose and WizardMode
 693     st->print(" #"); _type->dump_on(st);
 694   }