src/share/vm/opto/callnode.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/callnode.hpp

Print this page
rev 7690 : 6912521: System.arraycopy works slower than the simple loop for little lengths
Summary: convert small array copies to series of loads and stores
Reviewed-by:
rev 7691 : 6912521: System.arraycopy works slower than the simple loop for little lengths
Summary: convert small array copies to series of loads and stores
Reviewed-by:


1046 
1047   bool is_nested_lock_region(); // Is this Lock nested?
1048 };
1049 
1050 //------------------------------Unlock---------------------------------------
1051 // High-level unlock operation
1052 class UnlockNode : public AbstractLockNode {
1053 public:
1054   virtual int Opcode() const;
1055   virtual uint size_of() const; // Size is bigger
1056   UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1057     init_class_id(Class_Unlock);
1058     init_flags(Flag_is_macro);
1059     C->add_macro_node(this);
1060   }
1061   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1062   // unlock is never a safepoint
1063   virtual bool        guaranteed_safepoint()  { return false; }
1064 };
1065 
1066 class GraphKit;
1067 
1068 class ArrayCopyNode : public CallNode {
1069 private:
1070 
1071   // What kind of arraycopy variant is this?
1072   enum {
1073     None,            // not set yet
1074     ArrayCopy,       // System.arraycopy()
1075     CloneBasic,      // A clone that can be copied by 64 bit chunks
1076     CloneOop,        // An oop array clone
1077     CopyOf,          // Arrays.copyOf()
1078     CopyOfRange      // Arrays.copyOfRange()
1079   } _kind;
1080 
1081 #ifndef PRODUCT
1082   static const char* _kind_names[CopyOfRange+1];
1083 #endif
1084   // Is the alloc obtained with
1085   // AllocateArrayNode::Ideal_array_allocation() tighly coupled
1086   // (arraycopy follows immediately the allocation)?
1087   // We cache the result of LibraryCallKit::tightly_coupled_allocation
1088   // here because it's much easier to find whether there's a tightly
1089   // couple allocation at parse time than at macro expansion time. At
1090   // macro expansion time, for every use of the allocation node we
1091   // would need to figure out whether it happens after the arraycopy (and
1092   // can be ignored) or between the allocation and the arraycopy. At
1093   // parse time, it's straightforward because whatever happens after
1094   // the arraycopy is not parsed yet so doesn't exist when
1095   // LibraryCallKit::tightly_coupled_allocation() is called.
1096   bool _alloc_tightly_coupled;
1097 
1098   bool _arguments_validated;
1099 
1100   static const TypeFunc* arraycopy_type() {
1101     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1102     fields[Src]       = TypeInstPtr::BOTTOM;
1103     fields[SrcPos]    = TypeInt::INT;
1104     fields[Dest]      = TypeInstPtr::BOTTOM;
1105     fields[DestPos]   = TypeInt::INT;
1106     fields[Length]    = TypeInt::INT;
1107     fields[SrcLen]    = TypeInt::INT;
1108     fields[DestLen]   = TypeInt::INT;
1109     fields[SrcKlass]  = TypeKlassPtr::BOTTOM;
1110     fields[DestKlass] = TypeKlassPtr::BOTTOM;
1111     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1112 
1113     // create result type (range)
1114     fields = TypeTuple::fields(0);
1115 
1116     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
1117 
1118     return TypeFunc::make(domain, range);
1119   }
1120 
1121   ArrayCopyNode(Compile* C, bool alloc_tightly_coupled);
1122 
1123   int get_count(PhaseGVN *phase) const;
1124   static const TypePtr* get_address_type(PhaseGVN *phase, Node* n);
1125 
1126   Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count);
1127   bool finish_transform(PhaseGVN *phase, bool can_reshape,
1128                         Node* ctl, Node *mem);
1129 
1130 public:
1131 
1132   enum {
1133     Src   = TypeFunc::Parms,
1134     SrcPos,
1135     Dest,
1136     DestPos,
1137     Length,
1138     SrcLen,
1139     DestLen,
1140     SrcKlass,
1141     DestKlass,
1142     ParmLimit
1143   };
1144 
1145   static ArrayCopyNode* make(GraphKit* kit, bool may_throw,
1146                              Node* src, Node* src_offset,
1147                              Node* dest,  Node* dest_offset,
1148                              Node* length,
1149                              bool alloc_tightly_coupled,
1150                              Node* src_klass = NULL, Node* dest_klass = NULL,
1151                              Node* src_length = NULL, Node* dest_length = NULL);
1152 
1153   void connect_outputs(GraphKit* kit);
1154 
1155   bool is_arraycopy()             const  { assert(_kind != None, "should bet set"); return _kind == ArrayCopy; }
1156   bool is_arraycopy_validated()   const  { assert(_kind != None, "should bet set"); return _kind == ArrayCopy && _arguments_validated; }
1157   bool is_clonebasic()            const  { assert(_kind != None, "should bet set"); return _kind == CloneBasic; }
1158   bool is_cloneoop()              const  { assert(_kind != None, "should bet set"); return _kind == CloneOop; }
1159   bool is_copyof()                const  { assert(_kind != None, "should bet set"); return _kind == CopyOf; }
1160   bool is_copyofrange()           const  { assert(_kind != None, "should bet set"); return _kind == CopyOfRange; }
1161 
1162   void set_arraycopy(bool validated)   { assert(_kind == None, "shouldn't bet set yet"); _kind = ArrayCopy; _arguments_validated = validated; }
1163   void set_clonebasic()                { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneBasic; }
1164   void set_cloneoop()                  { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneOop; }
1165   void set_copyof()                    { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOf; _arguments_validated = false; }
1166   void set_copyofrange()               { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOfRange; _arguments_validated = false; }
1167 
1168   virtual int Opcode() const;
1169   virtual uint size_of() const; // Size is bigger
1170   virtual bool guaranteed_safepoint()  { return false; }
1171   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1172 
1173   bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; }
1174 
1175 #ifndef PRODUCT
1176   virtual void dump_spec(outputStream *st) const;
1177 #endif
1178 };
1179 #endif // SHARE_VM_OPTO_CALLNODE_HPP


1046 
1047   bool is_nested_lock_region(); // Is this Lock nested?
1048 };
1049 
1050 //------------------------------Unlock---------------------------------------
1051 // High-level unlock operation
1052 class UnlockNode : public AbstractLockNode {
1053 public:
1054   virtual int Opcode() const;
1055   virtual uint size_of() const; // Size is bigger
1056   UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1057     init_class_id(Class_Unlock);
1058     init_flags(Flag_is_macro);
1059     C->add_macro_node(this);
1060   }
1061   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1062   // unlock is never a safepoint
1063   virtual bool        guaranteed_safepoint()  { return false; }
1064 };
1065 

















































































































1066 #endif // SHARE_VM_OPTO_CALLNODE_HPP
src/share/vm/opto/callnode.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File