635 // TypeRawPtr::BOTTOM. Needs to be investigated.
636 if (cross_check != NULL &&
637 cross_check != TypePtr::BOTTOM &&
638 cross_check != TypeRawPtr::BOTTOM) {
639 // Recheck the alias index, to see if it has changed (due to a bug).
640 Compile* C = Compile::current();
641 assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
642 "must stay in the original alias category");
643 // The type of the address must be contained in the adr_type,
644 // disregarding "null"-ness.
645 // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
646 const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
647 assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
648 "real address must not escape from expected memory type");
649 }
650 #endif
651 return tp;
652 }
653 }
654
655 //------------------------adr_phi_is_loop_invariant----------------------------
656 // A helper function for Ideal_DU_postCCP to check if a Phi in a counted
657 // loop is loop invariant. Make a quick traversal of Phi and associated
658 // CastPP nodes, looking to see if they are a closed group within the loop.
659 bool MemNode::adr_phi_is_loop_invariant(Node* adr_phi, Node* cast) {
660 // The idea is that the phi-nest must boil down to only CastPP nodes
661 // with the same data. This implies that any path into the loop already
662 // includes such a CastPP, and so the original cast, whatever its input,
663 // must be covered by an equivalent cast, with an earlier control input.
664 ResourceMark rm;
665
666 // The loop entry input of the phi should be the unique dominating
667 // node for every Phi/CastPP in the loop.
668 Unique_Node_List closure;
669 closure.push(adr_phi->in(LoopNode::EntryControl));
670
671 // Add the phi node and the cast to the worklist.
672 Unique_Node_List worklist;
673 worklist.push(adr_phi);
674 if( cast != NULL ){
675 if( !cast->is_ConstraintCast() ) return false;
676 worklist.push(cast);
677 }
678
679 // Begin recursive walk of phi nodes.
680 while( worklist.size() ){
681 // Take a node off the worklist
682 Node *n = worklist.pop();
683 if( !closure.member(n) ){
684 // Add it to the closure.
685 closure.push(n);
686 // Make a sanity check to ensure we don't waste too much time here.
687 if( closure.size() > 20) return false;
688 // This node is OK if:
689 // - it is a cast of an identical value
690 // - or it is a phi node (then we add its inputs to the worklist)
691 // Otherwise, the node is not OK, and we presume the cast is not invariant
692 if( n->is_ConstraintCast() ){
693 worklist.push(n->in(1));
694 } else if( n->is_Phi() ) {
695 for( uint i = 1; i < n->req(); i++ ) {
696 worklist.push(n->in(i));
697 }
698 } else {
699 return false;
700 }
701 }
702 }
703
704 // Quit when the worklist is empty, and we've found no offending nodes.
705 return true;
706 }
707
708 //------------------------------Ideal_DU_postCCP-------------------------------
709 // Find any cast-away of null-ness and keep its control. Null cast-aways are
710 // going away in this pass and we need to make this memory op depend on the
711 // gating null check.
712 Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
713 return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address));
714 }
715
716 // I tried to leave the CastPP's in. This makes the graph more accurate in
717 // some sense; we get to keep around the knowledge that an oop is not-null
718 // after some test. Alas, the CastPP's interfere with GVN (some values are
719 // the regular oop, some are the CastPP of the oop, all merge at Phi's which
720 // cannot collapse, etc). This cost us 10% on SpecJVM, even when I removed
721 // some of the more trivial cases in the optimizer. Removing more useless
722 // Phi's started allowing Loads to illegally float above null checks. I gave
723 // up on this approach. CNC 10/20/2000
724 // This static method may be called not from MemNode (EncodePNode calls it).
725 // Only the control edge of the node 'n' might be updated.
726 Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) {
727 Node *skipped_cast = NULL;
728 // Need a null check? Regular static accesses do not because they are
729 // from constant addresses. Array ops are gated by the range check (which
730 // always includes a NULL check). Just check field ops.
731 if( n->in(MemNode::Control) == NULL ) {
732 // Scan upwards for the highest location we can place this memory op.
733 while( true ) {
734 switch( adr->Opcode() ) {
735
736 case Op_AddP: // No change to NULL-ness, so peek thru AddP's
737 adr = adr->in(AddPNode::Base);
738 continue;
739
740 case Op_DecodeN: // No change to NULL-ness, so peek thru
741 case Op_DecodeNKlass:
742 adr = adr->in(1);
743 continue;
744
745 case Op_EncodeP:
746 case Op_EncodePKlass:
747 // EncodeP node's control edge could be set by this method
748 // when EncodeP node depends on CastPP node.
749 //
750 // Use its control edge for memory op because EncodeP may go away
751 // later when it is folded with following or preceding DecodeN node.
752 if (adr->in(0) == NULL) {
753 // Keep looking for cast nodes.
754 adr = adr->in(1);
755 continue;
756 }
757 ccp->hash_delete(n);
758 n->set_req(MemNode::Control, adr->in(0));
759 ccp->hash_insert(n);
760 return n;
761
762 case Op_CastPP:
763 // If the CastPP is useless, just peek on through it.
764 if( ccp->type(adr) == ccp->type(adr->in(1)) ) {
765 // Remember the cast that we've peeked though. If we peek
766 // through more than one, then we end up remembering the highest
767 // one, that is, if in a loop, the one closest to the top.
768 skipped_cast = adr;
769 adr = adr->in(1);
770 continue;
771 }
772 // CastPP is going away in this pass! We need this memory op to be
773 // control-dependent on the test that is guarding the CastPP.
774 ccp->hash_delete(n);
775 n->set_req(MemNode::Control, adr->in(0));
776 ccp->hash_insert(n);
777 return n;
778
779 case Op_Phi:
780 // Attempt to float above a Phi to some dominating point.
781 if (adr->in(0) != NULL && adr->in(0)->is_CountedLoop()) {
782 // If we've already peeked through a Cast (which could have set the
783 // control), we can't float above a Phi, because the skipped Cast
784 // may not be loop invariant.
785 if (adr_phi_is_loop_invariant(adr, skipped_cast)) {
786 adr = adr->in(1);
787 continue;
788 }
789 }
790
791 // Intentional fallthrough!
792
793 // No obvious dominating point. The mem op is pinned below the Phi
794 // by the Phi itself. If the Phi goes away (no true value is merged)
795 // then the mem op can float, but not indefinitely. It must be pinned
796 // behind the controls leading to the Phi.
797 case Op_CheckCastPP:
798 // These usually stick around to change address type, however a
799 // useless one can be elided and we still need to pick up a control edge
800 if (adr->in(0) == NULL) {
801 // This CheckCastPP node has NO control and is likely useless. But we
802 // need check further up the ancestor chain for a control input to keep
803 // the node in place. 4959717.
804 skipped_cast = adr;
805 adr = adr->in(1);
806 continue;
807 }
808 ccp->hash_delete(n);
809 n->set_req(MemNode::Control, adr->in(0));
810 ccp->hash_insert(n);
811 return n;
812
813 // List of "safe" opcodes; those that implicitly block the memory
814 // op below any null check.
815 case Op_CastX2P: // no null checks on native pointers
816 case Op_Parm: // 'this' pointer is not null
817 case Op_LoadP: // Loading from within a klass
818 case Op_LoadN: // Loading from within a klass
819 case Op_LoadKlass: // Loading from within a klass
820 case Op_LoadNKlass: // Loading from within a klass
821 case Op_ConP: // Loading from a klass
822 case Op_ConN: // Loading from a klass
823 case Op_ConNKlass: // Loading from a klass
824 case Op_CreateEx: // Sucking up the guts of an exception oop
825 case Op_Con: // Reading from TLS
826 case Op_CMoveP: // CMoveP is pinned
827 case Op_CMoveN: // CMoveN is pinned
828 break; // No progress
829
830 case Op_Proj: // Direct call to an allocation routine
831 case Op_SCMemProj: // Memory state from store conditional ops
832 #ifdef ASSERT
833 {
834 assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value");
835 const Node* call = adr->in(0);
836 if (call->is_CallJava()) {
837 const CallJavaNode* call_java = call->as_CallJava();
838 const TypeTuple *r = call_java->tf()->range();
839 assert(r->cnt() > TypeFunc::Parms, "must return value");
840 const Type* ret_type = r->field_at(TypeFunc::Parms);
841 assert(ret_type && ret_type->isa_ptr(), "must return pointer");
842 // We further presume that this is one of
843 // new_instance_Java, new_array_Java, or
844 // the like, but do not assert for this.
845 } else if (call->is_Allocate()) {
846 // similar case to new_instance_Java, etc.
847 } else if (!call->is_CallLeaf()) {
848 // Projections from fetch_oop (OSR) are allowed as well.
849 ShouldNotReachHere();
850 }
851 }
852 #endif
853 break;
854 default:
855 ShouldNotReachHere();
856 }
857 break;
858 }
859 }
860
861 return NULL; // No progress
862 }
863
864
865 //=============================================================================
866 // Should LoadNode::Ideal() attempt to remove control edges?
867 bool LoadNode::can_remove_control() const {
868 return true;
869 }
870 uint LoadNode::size_of() const { return sizeof(*this); }
871 uint LoadNode::cmp( const Node &n ) const
872 { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
873 const Type *LoadNode::bottom_type() const { return _type; }
874 uint LoadNode::ideal_reg() const {
875 return _type->ideal_reg();
876 }
877
878 #ifndef PRODUCT
879 void LoadNode::dump_spec(outputStream *st) const {
880 MemNode::dump_spec(st);
881 if( !Verbose && !WizardMode ) {
882 // standard dump does this in Verbose and WizardMode
883 st->print(" #"); _type->dump_on(st);
884 }
|
635 // TypeRawPtr::BOTTOM. Needs to be investigated.
636 if (cross_check != NULL &&
637 cross_check != TypePtr::BOTTOM &&
638 cross_check != TypeRawPtr::BOTTOM) {
639 // Recheck the alias index, to see if it has changed (due to a bug).
640 Compile* C = Compile::current();
641 assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
642 "must stay in the original alias category");
643 // The type of the address must be contained in the adr_type,
644 // disregarding "null"-ness.
645 // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
646 const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
647 assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
648 "real address must not escape from expected memory type");
649 }
650 #endif
651 return tp;
652 }
653 }
654
655 //=============================================================================
656 // Should LoadNode::Ideal() attempt to remove control edges?
657 bool LoadNode::can_remove_control() const {
658 return true;
659 }
660 uint LoadNode::size_of() const { return sizeof(*this); }
661 uint LoadNode::cmp( const Node &n ) const
662 { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
663 const Type *LoadNode::bottom_type() const { return _type; }
664 uint LoadNode::ideal_reg() const {
665 return _type->ideal_reg();
666 }
667
668 #ifndef PRODUCT
669 void LoadNode::dump_spec(outputStream *st) const {
670 MemNode::dump_spec(st);
671 if( !Verbose && !WizardMode ) {
672 // standard dump does this in Verbose and WizardMode
673 st->print(" #"); _type->dump_on(st);
674 }
|