63 //------------------------------StartNode--------------------------------------
64 // The method start node
65 class StartNode : public MultiNode {
66 virtual uint cmp( const Node &n ) const;
67 virtual uint size_of() const; // Size is bigger
68 public:
69 const TypeTuple *_domain;
70 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
71 init_class_id(Class_Start);
72 init_req(0,this);
73 init_req(1,root);
74 }
75 virtual int Opcode() const;
76 virtual bool pinned() const { return true; };
77 virtual const Type *bottom_type() const;
78 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
79 virtual const Type* Value(PhaseGVN* phase) const;
80 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
81 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
82 virtual const RegMask &in_RegMask(uint) const;
83 virtual Node *match( const ProjNode *proj, const Matcher *m );
84 virtual uint ideal_reg() const { return 0; }
85 #ifndef PRODUCT
86 virtual void dump_spec(outputStream *st) const;
87 virtual void dump_compact_spec(outputStream *st) const;
88 #endif
89 };
90
91 //------------------------------StartOSRNode-----------------------------------
92 // The method start node for on stack replacement code
93 class StartOSRNode : public StartNode {
94 public:
95 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
96 virtual int Opcode() const;
97 static const TypeTuple *osr_domain();
98 };
99
100
101 //------------------------------ParmNode---------------------------------------
102 // Incoming parameters
103 class ParmNode : public ProjNode {
525
526 virtual uint size_of() const { return sizeof(*this); }
527
528 // Assumes that "this" is an argument to a safepoint node "s", and that
529 // "new_call" is being created to correspond to "s". But the difference
530 // between the start index of the jvmstates of "new_call" and "s" is
531 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
532 // corresponds appropriately to "this" in "new_call". Assumes that
533 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
534 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
535 SafePointScalarObjectNode* clone(Dict* sosn_map) const;
536
537 #ifndef PRODUCT
538 virtual void dump_spec(outputStream *st) const;
539 #endif
540 };
541
542
543 // Simple container for the outgoing projections of a call. Useful
544 // for serious surgery on calls.
545 class CallProjections : public StackObj {
546 public:
547 Node* fallthrough_proj;
548 Node* fallthrough_catchproj;
549 Node* fallthrough_memproj;
550 Node* fallthrough_ioproj;
551 Node* catchall_catchproj;
552 Node* catchall_memproj;
553 Node* catchall_ioproj;
554 Node* resproj;
555 Node* exobj;
556 };
557
558 class CallGenerator;
559
560 //------------------------------CallNode---------------------------------------
561 // Call nodes now subsume the function of debug nodes at callsites, so they
562 // contain the functionality of a full scope chain of debug nodes.
563 class CallNode : public SafePointNode {
564 friend class VMStructs;
565
566 protected:
567 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase);
568
569 public:
570 const TypeFunc *_tf; // Function type
571 address _entry_point; // Address of method being called
572 float _cnt; // Estimate of number of times called
573 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
574 const char *_name; // Printable name, if _method is NULL
575
576 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
577 : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
578 _tf(tf),
579 _entry_point(addr),
580 _cnt(COUNT_UNKNOWN),
581 _generator(NULL),
582 _name(NULL)
583 {
584 init_class_id(Class_Call);
585 }
586
587 const TypeFunc* tf() const { return _tf; }
588 const address entry_point() const { return _entry_point; }
589 const float cnt() const { return _cnt; }
590 CallGenerator* generator() const { return _generator; }
591
592 void set_tf(const TypeFunc* tf) { _tf = tf; }
593 void set_entry_point(address p) { _entry_point = p; }
594 void set_cnt(float c) { _cnt = c; }
595 void set_generator(CallGenerator* cg) { _generator = cg; }
596
597 virtual const Type *bottom_type() const;
598 virtual const Type* Value(PhaseGVN* phase) const;
599 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
600 virtual Node* Identity(PhaseGVN* phase) { return this; }
601 virtual uint cmp( const Node &n ) const;
602 virtual uint size_of() const = 0;
603 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
604 virtual Node *match( const ProjNode *proj, const Matcher *m );
605 virtual uint ideal_reg() const { return NotAMachineReg; }
606 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
607 // for some macro nodes whose expansion does not have a safepoint on the fast path.
608 virtual bool guaranteed_safepoint() { return true; }
609 // For macro nodes, the JVMState gets modified during expansion. If calls
610 // use MachConstantBase, it gets modified during matching. So when cloning
611 // the node the JVMState must be cloned. Default is not to clone.
612 virtual void clone_jvms(Compile* C) {
613 if (C->needs_clone_jvms() && jvms() != NULL) {
614 set_jvms(jvms()->clone_deep(C));
615 jvms()->set_map_deep(this);
616 }
617 }
618
619 // Returns true if the call may modify n
620 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
621 // Does this node have a use of n other than in debug information?
622 bool has_non_debug_use(Node *n);
623 // Returns the unique CheckCastPP of a call
624 // or result projection is there are several CheckCastPP
625 // or returns NULL if there is no one.
626 Node *result_cast();
627 // Does this node returns pointer?
628 bool returns_pointer() const {
629 const TypeTuple *r = tf()->range();
630 return (r->cnt() > TypeFunc::Parms &&
631 r->field_at(TypeFunc::Parms)->isa_ptr());
632 }
633
634 // Collect all the interesting edges from a call for use in
635 // replacing the call by something else. Used by macro expansion
636 // and the late inlining support.
637 void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
638
639 virtual uint match_edge(uint idx) const;
640
641 bool is_call_to_arraycopystub() const;
642
643 #ifndef PRODUCT
644 virtual void dump_req(outputStream *st = tty) const;
645 virtual void dump_spec(outputStream *st) const;
646 #endif
647 };
648
649
650 //------------------------------CallJavaNode-----------------------------------
651 // Make a static or dynamic subroutine call node using Java calling
652 // convention. (The "Java" calling convention is the compiler's calling
653 // convention, as opposed to the interpreter's or that of native C.)
654 class CallJavaNode : public CallNode {
655 friend class VMStructs;
656 protected:
657 virtual uint cmp( const Node &n ) const;
689 virtual void dump_spec(outputStream *st) const;
690 virtual void dump_compact_spec(outputStream *st) const;
691 #endif
692 };
693
694 //------------------------------CallStaticJavaNode-----------------------------
695 // Make a direct subroutine call using Java calling convention (for static
696 // calls and optimized virtual calls, plus calls to wrappers for run-time
697 // routines); generates static stub.
698 class CallStaticJavaNode : public CallJavaNode {
699 virtual uint cmp( const Node &n ) const;
700 virtual uint size_of() const; // Size is bigger
701 public:
702 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci)
703 : CallJavaNode(tf, addr, method, bci) {
704 init_class_id(Class_CallStaticJava);
705 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
706 init_flags(Flag_is_macro);
707 C->add_macro_node(this);
708 }
709 _is_scalar_replaceable = false;
710 _is_non_escaping = false;
711 }
712 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
713 const TypePtr* adr_type)
714 : CallJavaNode(tf, addr, NULL, bci) {
715 init_class_id(Class_CallStaticJava);
716 // This node calls a runtime stub, which often has narrow memory effects.
717 _adr_type = adr_type;
718 _is_scalar_replaceable = false;
719 _is_non_escaping = false;
720 _name = name;
721 }
722
723 // Result of Escape Analysis
724 bool _is_scalar_replaceable;
725 bool _is_non_escaping;
726
727 // If this is an uncommon trap, return the request code, else zero.
728 int uncommon_trap_request() const;
798 init_class_id(Class_CallLeaf);
799 }
800 virtual int Opcode() const;
801 virtual bool guaranteed_safepoint() { return false; }
802 #ifndef PRODUCT
803 virtual void dump_spec(outputStream *st) const;
804 #endif
805 };
806
807 //------------------------------CallLeafNoFPNode-------------------------------
808 // CallLeafNode, not using floating point or using it in the same manner as
809 // the generated code
810 class CallLeafNoFPNode : public CallLeafNode {
811 public:
812 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
813 const TypePtr* adr_type)
814 : CallLeafNode(tf, addr, name, adr_type)
815 {
816 }
817 virtual int Opcode() const;
818 };
819
820
821 //------------------------------Allocate---------------------------------------
822 // High-level memory allocation
823 //
824 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
825 // get expanded into a code sequence containing a call. Unlike other CallNodes,
826 // they have 2 memory projections and 2 i_o projections (which are distinguished by
827 // the _is_io_use flag in the projection.) This is needed when expanding the node in
828 // order to differentiate the uses of the projection on the normal control path from
829 // those on the exception return path.
830 //
831 class AllocateNode : public CallNode {
832 public:
833 enum {
834 // Output:
835 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
836 // Inputs:
837 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
838 KlassNode, // type (maybe dynamic) of the obj.
839 InitialTest, // slow-path test (may be constant)
840 ALength, // array length (or TOP if none)
841 ParmLimit
842 };
843
844 static const TypeFunc* alloc_type(const Type* t) {
845 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
846 fields[AllocSize] = TypeInt::POS;
847 fields[KlassNode] = TypeInstPtr::NOTNULL;
848 fields[InitialTest] = TypeInt::BOOL;
849 fields[ALength] = t; // length (can be a bad length)
850
851 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
852
853 // create result type (range)
854 fields = TypeTuple::fields(1);
855 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
856
857 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
858
859 return TypeFunc::make(domain, range);
860 }
861
862 // Result of Escape Analysis
863 bool _is_scalar_replaceable;
864 bool _is_non_escaping;
865 // True when MemBar for new is redundant with MemBar at initialzer exit
866 bool _is_allocation_MemBar_redundant;
867
868 virtual uint size_of() const; // Size is bigger
869 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
870 Node *size, Node *klass_node, Node *initial_test);
871 // Expansion modifies the JVMState, so we need to clone it
872 virtual void clone_jvms(Compile* C) {
873 if (jvms() != NULL) {
874 set_jvms(jvms()->clone_deep(C));
875 jvms()->set_map_deep(this);
876 }
877 }
878 virtual int Opcode() const;
879 virtual uint ideal_reg() const { return Op_RegP; }
880 virtual bool guaranteed_safepoint() { return false; }
881
882 // allocations do not modify their arguments
883 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
884
885 // Pattern-match a possible usage of AllocateNode.
886 // Return null if no allocation is recognized.
887 // The operand is the pointer produced by the (possible) allocation.
888 // It must be a projection of the Allocate or its subsequent CastPP.
889 // (Note: This function is defined in file graphKit.cpp, near
890 // GraphKit::new_instance/new_array, whose output it recognizes.)
891 // The 'ptr' may not have an offset unless the 'offset' argument is given.
892 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
893
894 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
895 // an offset, which is reported back to the caller.
896 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
897 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
898 intptr_t& offset);
899
900 // Dig the klass operand out of a (possible) allocation site.
901 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
919 bool maybe_set_complete(PhaseGVN* phase);
920
921 // Return true if allocation doesn't escape thread, its escape state
922 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
923 // is true when its allocation's escape state is noEscape or
924 // ArgEscape. In case allocation's InitializeNode is NULL, check
925 // AlllocateNode._is_non_escaping flag.
926 // AlllocateNode._is_non_escaping is true when its escape state is
927 // noEscape.
928 bool does_not_escape_thread() {
929 InitializeNode* init = NULL;
930 return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
931 }
932
933 // If object doesn't escape in <.init> method and there is memory barrier
934 // inserted at exit of its <.init>, memory barrier for new is not necessary.
935 // Inovke this method when MemBar at exit of initializer and post-dominate
936 // allocation node.
937 void compute_MemBar_redundancy(ciMethod* initializer);
938 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
939 };
940
941 //------------------------------AllocateArray---------------------------------
942 //
943 // High-level array allocation
944 //
945 class AllocateArrayNode : public AllocateNode {
946 public:
947 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
948 Node* size, Node* klass_node, Node* initial_test,
949 Node* count_val
950 )
951 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
952 initial_test)
953 {
954 init_class_id(Class_AllocateArray);
955 set_req(AllocateNode::ALength, count_val);
956 }
957 virtual int Opcode() const;
958 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
959
960 // Dig the length operand out of a array allocation site.
961 Node* Ideal_length() {
962 return in(AllocateNode::ALength);
963 }
964
965 // Dig the length operand out of a array allocation site and narrow the
966 // type with a CastII, if necesssary
967 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
968
969 // Pattern-match a possible usage of AllocateArrayNode.
970 // Return null if no allocation is recognized.
971 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
972 AllocateNode* allo = Ideal_allocation(ptr, phase);
973 return (allo == NULL || !allo->is_AllocateArray())
974 ? NULL : allo->as_AllocateArray();
975 }
1055 // 0 - object to lock
1056 // 1 - a BoxLockNode
1057 // 2 - a FastLockNode
1058 //
1059 class LockNode : public AbstractLockNode {
1060 public:
1061
1062 static const TypeFunc *lock_type() {
1063 // create input type (domain)
1064 const Type **fields = TypeTuple::fields(3);
1065 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
1066 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
1067 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
1068 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1069
1070 // create result type (range)
1071 fields = TypeTuple::fields(0);
1072
1073 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1074
1075 return TypeFunc::make(domain,range);
1076 }
1077
1078 virtual int Opcode() const;
1079 virtual uint size_of() const; // Size is bigger
1080 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1081 init_class_id(Class_Lock);
1082 init_flags(Flag_is_macro);
1083 C->add_macro_node(this);
1084 }
1085 virtual bool guaranteed_safepoint() { return false; }
1086
1087 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1088 // Expansion modifies the JVMState, so we need to clone it
1089 virtual void clone_jvms(Compile* C) {
1090 if (jvms() != NULL) {
1091 set_jvms(jvms()->clone_deep(C));
1092 jvms()->set_map_deep(this);
1093 }
1094 }
1095
|
63 //------------------------------StartNode--------------------------------------
64 // The method start node
65 class StartNode : public MultiNode {
66 virtual uint cmp( const Node &n ) const;
67 virtual uint size_of() const; // Size is bigger
68 public:
69 const TypeTuple *_domain;
70 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
71 init_class_id(Class_Start);
72 init_req(0,this);
73 init_req(1,root);
74 }
75 virtual int Opcode() const;
76 virtual bool pinned() const { return true; };
77 virtual const Type *bottom_type() const;
78 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
79 virtual const Type* Value(PhaseGVN* phase) const;
80 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
81 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
82 virtual const RegMask &in_RegMask(uint) const;
83 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
84 virtual uint ideal_reg() const { return 0; }
85 #ifndef PRODUCT
86 virtual void dump_spec(outputStream *st) const;
87 virtual void dump_compact_spec(outputStream *st) const;
88 #endif
89 };
90
91 //------------------------------StartOSRNode-----------------------------------
92 // The method start node for on stack replacement code
93 class StartOSRNode : public StartNode {
94 public:
95 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
96 virtual int Opcode() const;
97 static const TypeTuple *osr_domain();
98 };
99
100
101 //------------------------------ParmNode---------------------------------------
102 // Incoming parameters
103 class ParmNode : public ProjNode {
525
526 virtual uint size_of() const { return sizeof(*this); }
527
528 // Assumes that "this" is an argument to a safepoint node "s", and that
529 // "new_call" is being created to correspond to "s". But the difference
530 // between the start index of the jvmstates of "new_call" and "s" is
531 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
532 // corresponds appropriately to "this" in "new_call". Assumes that
533 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
534 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
535 SafePointScalarObjectNode* clone(Dict* sosn_map) const;
536
537 #ifndef PRODUCT
538 virtual void dump_spec(outputStream *st) const;
539 #endif
540 };
541
542
543 // Simple container for the outgoing projections of a call. Useful
544 // for serious surgery on calls.
545 class CallProjections {
546 public:
547 Node* fallthrough_proj;
548 Node* fallthrough_catchproj;
549 Node* fallthrough_memproj;
550 Node* fallthrough_ioproj;
551 Node* catchall_catchproj;
552 Node* catchall_memproj;
553 Node* catchall_ioproj;
554 Node* exobj;
555 uint nb_resproj;
556 Node* resproj[1]; // at least one projection
557
558 CallProjections(uint nbres) {
559 fallthrough_proj = NULL;
560 fallthrough_catchproj = NULL;
561 fallthrough_memproj = NULL;
562 fallthrough_ioproj = NULL;
563 catchall_catchproj = NULL;
564 catchall_memproj = NULL;
565 catchall_ioproj = NULL;
566 exobj = NULL;
567 nb_resproj = nbres;
568 resproj[0] = NULL;
569 for (uint i = 1; i < nb_resproj; i++) {
570 resproj[i] = NULL;
571 }
572 }
573
574 };
575
576 class CallGenerator;
577
578 //------------------------------CallNode---------------------------------------
579 // Call nodes now subsume the function of debug nodes at callsites, so they
580 // contain the functionality of a full scope chain of debug nodes.
581 class CallNode : public SafePointNode {
582 friend class VMStructs;
583
584 protected:
585 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase);
586
587 public:
588 const TypeFunc *_tf; // Function type
589 address _entry_point; // Address of method being called
590 float _cnt; // Estimate of number of times called
591 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
592 const char *_name; // Printable name, if _method is NULL
593
594 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
595 : SafePointNode(tf->domain_cc()->cnt(), NULL, adr_type),
596 _tf(tf),
597 _entry_point(addr),
598 _cnt(COUNT_UNKNOWN),
599 _generator(NULL),
600 _name(NULL)
601 {
602 init_class_id(Class_Call);
603 }
604
605 const TypeFunc* tf() const { return _tf; }
606 const address entry_point() const { return _entry_point; }
607 const float cnt() const { return _cnt; }
608 CallGenerator* generator() const { return _generator; }
609
610 void set_tf(const TypeFunc* tf) { _tf = tf; }
611 void set_entry_point(address p) { _entry_point = p; }
612 void set_cnt(float c) { _cnt = c; }
613 void set_generator(CallGenerator* cg) { _generator = cg; }
614
615 virtual const Type *bottom_type() const;
616 virtual const Type* Value(PhaseGVN* phase) const;
617 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
618 virtual Node* Identity(PhaseGVN* phase) { return this; }
619 virtual uint cmp( const Node &n ) const;
620 virtual uint size_of() const = 0;
621 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
622 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
623 virtual uint ideal_reg() const { return NotAMachineReg; }
624 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
625 // for some macro nodes whose expansion does not have a safepoint on the fast path.
626 virtual bool guaranteed_safepoint() { return true; }
627 // For macro nodes, the JVMState gets modified during expansion. If calls
628 // use MachConstantBase, it gets modified during matching. So when cloning
629 // the node the JVMState must be cloned. Default is not to clone.
630 virtual void clone_jvms(Compile* C) {
631 if (C->needs_clone_jvms() && jvms() != NULL) {
632 set_jvms(jvms()->clone_deep(C));
633 jvms()->set_map_deep(this);
634 }
635 }
636
637 // Returns true if the call may modify n
638 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
639 // Does this node have a use of n other than in debug information?
640 bool has_non_debug_use(Node *n);
641 bool has_debug_use(Node *n);
642 // Returns the unique CheckCastPP of a call
643 // or result projection is there are several CheckCastPP
644 // or returns NULL if there is no one.
645 Node *result_cast();
646 // Does this node returns pointer?
647 bool returns_pointer() const {
648 const TypeTuple *r = tf()->range_sig();
649 return (!tf()->returns_value_type_as_fields() &&
650 r->cnt() > TypeFunc::Parms &&
651 r->field_at(TypeFunc::Parms)->isa_ptr());
652 }
653
654 // Collect all the interesting edges from a call for use in
655 // replacing the call by something else. Used by macro expansion
656 // and the late inlining support.
657 CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true);
658
659 virtual uint match_edge(uint idx) const;
660
661 bool is_call_to_arraycopystub() const;
662
663 #ifndef PRODUCT
664 virtual void dump_req(outputStream *st = tty) const;
665 virtual void dump_spec(outputStream *st) const;
666 #endif
667 };
668
669
670 //------------------------------CallJavaNode-----------------------------------
671 // Make a static or dynamic subroutine call node using Java calling
672 // convention. (The "Java" calling convention is the compiler's calling
673 // convention, as opposed to the interpreter's or that of native C.)
674 class CallJavaNode : public CallNode {
675 friend class VMStructs;
676 protected:
677 virtual uint cmp( const Node &n ) const;
709 virtual void dump_spec(outputStream *st) const;
710 virtual void dump_compact_spec(outputStream *st) const;
711 #endif
712 };
713
714 //------------------------------CallStaticJavaNode-----------------------------
715 // Make a direct subroutine call using Java calling convention (for static
716 // calls and optimized virtual calls, plus calls to wrappers for run-time
717 // routines); generates static stub.
718 class CallStaticJavaNode : public CallJavaNode {
719 virtual uint cmp( const Node &n ) const;
720 virtual uint size_of() const; // Size is bigger
721 public:
722 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci)
723 : CallJavaNode(tf, addr, method, bci) {
724 init_class_id(Class_CallStaticJava);
725 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
726 init_flags(Flag_is_macro);
727 C->add_macro_node(this);
728 }
729 const TypeTuple *r = tf->range_sig();
730 if (ValueTypeReturnedAsFields &&
731 method != NULL &&
732 method->is_method_handle_intrinsic() &&
733 r->cnt() > TypeFunc::Parms &&
734 r->field_at(TypeFunc::Parms)->isa_oopptr() &&
735 r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_value_type()) {
736 // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
737 init_flags(Flag_is_macro);
738 C->add_macro_node(this);
739 }
740
741 _is_scalar_replaceable = false;
742 _is_non_escaping = false;
743 }
744 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
745 const TypePtr* adr_type)
746 : CallJavaNode(tf, addr, NULL, bci) {
747 init_class_id(Class_CallStaticJava);
748 // This node calls a runtime stub, which often has narrow memory effects.
749 _adr_type = adr_type;
750 _is_scalar_replaceable = false;
751 _is_non_escaping = false;
752 _name = name;
753 }
754
755 // Result of Escape Analysis
756 bool _is_scalar_replaceable;
757 bool _is_non_escaping;
758
759 // If this is an uncommon trap, return the request code, else zero.
760 int uncommon_trap_request() const;
830 init_class_id(Class_CallLeaf);
831 }
832 virtual int Opcode() const;
833 virtual bool guaranteed_safepoint() { return false; }
834 #ifndef PRODUCT
835 virtual void dump_spec(outputStream *st) const;
836 #endif
837 };
838
839 //------------------------------CallLeafNoFPNode-------------------------------
840 // CallLeafNode, not using floating point or using it in the same manner as
841 // the generated code
842 class CallLeafNoFPNode : public CallLeafNode {
843 public:
844 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
845 const TypePtr* adr_type)
846 : CallLeafNode(tf, addr, name, adr_type)
847 {
848 }
849 virtual int Opcode() const;
850 virtual uint match_edge(uint idx) const;
851 };
852
853
854 //------------------------------Allocate---------------------------------------
855 // High-level memory allocation
856 //
857 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
858 // get expanded into a code sequence containing a call. Unlike other CallNodes,
859 // they have 2 memory projections and 2 i_o projections (which are distinguished by
860 // the _is_io_use flag in the projection.) This is needed when expanding the node in
861 // order to differentiate the uses of the projection on the normal control path from
862 // those on the exception return path.
863 //
864 class AllocateNode : public CallNode {
865 public:
866 enum {
867 // Output:
868 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
869 // Inputs:
870 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
871 KlassNode, // type (maybe dynamic) of the obj.
872 InitialTest, // slow-path test (may be constant)
873 ALength, // array length (or TOP if none)
874 ValueNode,
875 DefaultValue, // default value in case of non flattened value array
876 RawDefaultValue, // same as above but as raw machine word
877 ParmLimit
878 };
879
880 static const TypeFunc* alloc_type(const Type* t) {
881 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
882 fields[AllocSize] = TypeInt::POS;
883 fields[KlassNode] = TypeInstPtr::NOTNULL;
884 fields[InitialTest] = TypeInt::BOOL;
885 fields[ALength] = t; // length (can be a bad length)
886 fields[ValueNode] = Type::BOTTOM;
887 fields[DefaultValue] = TypeInstPtr::NOTNULL;
888 fields[RawDefaultValue] = TypeX_X;
889
890 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
891
892 // create result type (range)
893 fields = TypeTuple::fields(1);
894 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
895
896 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
897
898 return TypeFunc::make(domain, range);
899 }
900
901 // Result of Escape Analysis
902 bool _is_scalar_replaceable;
903 bool _is_non_escaping;
904 // True when MemBar for new is redundant with MemBar at initialzer exit
905 bool _is_allocation_MemBar_redundant;
906 bool _larval;
907
908 virtual uint size_of() const; // Size is bigger
909 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
910 Node *size, Node *klass_node, Node *initial_test,
911 ValueTypeBaseNode* value_node = NULL);
912 // Expansion modifies the JVMState, so we need to clone it
913 virtual void clone_jvms(Compile* C) {
914 if (jvms() != NULL) {
915 set_jvms(jvms()->clone_deep(C));
916 jvms()->set_map_deep(this);
917 }
918 }
919 virtual int Opcode() const;
920 virtual uint ideal_reg() const { return Op_RegP; }
921 virtual bool guaranteed_safepoint() { return false; }
922
923 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
924
925 // allocations do not modify their arguments
926 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
927
928 // Pattern-match a possible usage of AllocateNode.
929 // Return null if no allocation is recognized.
930 // The operand is the pointer produced by the (possible) allocation.
931 // It must be a projection of the Allocate or its subsequent CastPP.
932 // (Note: This function is defined in file graphKit.cpp, near
933 // GraphKit::new_instance/new_array, whose output it recognizes.)
934 // The 'ptr' may not have an offset unless the 'offset' argument is given.
935 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
936
937 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
938 // an offset, which is reported back to the caller.
939 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
940 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
941 intptr_t& offset);
942
943 // Dig the klass operand out of a (possible) allocation site.
944 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
962 bool maybe_set_complete(PhaseGVN* phase);
963
964 // Return true if allocation doesn't escape thread, its escape state
965 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
966 // is true when its allocation's escape state is noEscape or
967 // ArgEscape. In case allocation's InitializeNode is NULL, check
968 // AlllocateNode._is_non_escaping flag.
969 // AlllocateNode._is_non_escaping is true when its escape state is
970 // noEscape.
971 bool does_not_escape_thread() {
972 InitializeNode* init = NULL;
973 return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
974 }
975
976 // If object doesn't escape in <.init> method and there is memory barrier
977 // inserted at exit of its <.init>, memory barrier for new is not necessary.
978 // Inovke this method when MemBar at exit of initializer and post-dominate
979 // allocation node.
980 void compute_MemBar_redundancy(ciMethod* initializer);
981 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
982
983 Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem, Node* klass_node);
984 };
985
986 //------------------------------AllocateArray---------------------------------
987 //
988 // High-level array allocation
989 //
990 class AllocateArrayNode : public AllocateNode {
991 public:
992 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
993 Node* size, Node* klass_node, Node* initial_test,
994 Node* count_val, Node* default_value, Node* raw_default_value
995 )
996 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
997 initial_test)
998 {
999 init_class_id(Class_AllocateArray);
1000 set_req(AllocateNode::ALength, count_val);
1001 init_req(AllocateNode::DefaultValue, default_value);
1002 init_req(AllocateNode::RawDefaultValue, raw_default_value);
1003 }
1004 virtual int Opcode() const;
1005 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1006
1007 // Dig the length operand out of a array allocation site.
1008 Node* Ideal_length() {
1009 return in(AllocateNode::ALength);
1010 }
1011
1012 // Dig the length operand out of a array allocation site and narrow the
1013 // type with a CastII, if necesssary
1014 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
1015
1016 // Pattern-match a possible usage of AllocateArrayNode.
1017 // Return null if no allocation is recognized.
1018 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
1019 AllocateNode* allo = Ideal_allocation(ptr, phase);
1020 return (allo == NULL || !allo->is_AllocateArray())
1021 ? NULL : allo->as_AllocateArray();
1022 }
1102 // 0 - object to lock
1103 // 1 - a BoxLockNode
1104 // 2 - a FastLockNode
1105 //
1106 class LockNode : public AbstractLockNode {
1107 public:
1108
1109 static const TypeFunc *lock_type() {
1110 // create input type (domain)
1111 const Type **fields = TypeTuple::fields(3);
1112 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
1113 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
1114 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
1115 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1116
1117 // create result type (range)
1118 fields = TypeTuple::fields(0);
1119
1120 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1121
1122 return TypeFunc::make(domain, range);
1123 }
1124
1125 virtual int Opcode() const;
1126 virtual uint size_of() const; // Size is bigger
1127 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1128 init_class_id(Class_Lock);
1129 init_flags(Flag_is_macro);
1130 C->add_macro_node(this);
1131 }
1132 virtual bool guaranteed_safepoint() { return false; }
1133
1134 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1135 // Expansion modifies the JVMState, so we need to clone it
1136 virtual void clone_jvms(Compile* C) {
1137 if (jvms() != NULL) {
1138 set_jvms(jvms()->clone_deep(C));
1139 jvms()->set_map_deep(this);
1140 }
1141 }
1142
|