60 return remove_dead_region(phase, can_reshape) ? this : NULL;
61 }
62
63 //------------------------------calling_convention-----------------------------
64 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
65 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
66 }
67
68 //------------------------------Registers--------------------------------------
69 const RegMask &StartNode::in_RegMask(uint) const {
70 return RegMask::Empty;
71 }
72
73 //------------------------------match------------------------------------------
74 // Construct projections for incoming parameters, and their RegMask info
75 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
76 switch (proj->_con) {
77 case TypeFunc::Control:
78 case TypeFunc::I_O:
79 case TypeFunc::Memory:
80 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
81 case TypeFunc::FramePtr:
82 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
83 case TypeFunc::ReturnAdr:
84 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
85 case TypeFunc::Parms:
86 default: {
87 uint parm_num = proj->_con - TypeFunc::Parms;
88 const Type *t = _domain->field_at(proj->_con);
89 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
90 return new ConNode(Type::TOP);
91 uint ideal_reg = t->ideal_reg();
92 RegMask &rm = match->_calling_convention_mask[parm_num];
93 return new MachProjNode(this,proj->_con,rm,ideal_reg);
94 }
95 }
96 return NULL;
97 }
98
99 //------------------------------StartOSRNode----------------------------------
100 // The method start node for an on stack replacement adapter
101
102 //------------------------------osr_domain-----------------------------
103 const TypeTuple *StartOSRNode::osr_domain() {
104 const Type **fields = TypeTuple::fields(2);
105 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
106
107 return TypeTuple::make(TypeFunc::Parms+1, fields);
108 }
109
110 //=============================================================================
111 const char * const ParmNode::names[TypeFunc::Parms+1] = {
124 }
125
126 void ParmNode::dump_compact_spec(outputStream *st) const {
127 if (_con < TypeFunc::Parms) {
128 st->print("%s", names[_con]);
129 } else {
130 st->print("%d:", _con-TypeFunc::Parms);
131 // unconditionally dump bottom_type
132 bottom_type()->dump_on(st);
133 }
134 }
135
136 // For a ParmNode, all immediate inputs and outputs are considered relevant
137 // both in compact and standard representation.
138 void ParmNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
139 this->collect_nodes(in_rel, 1, false, false);
140 this->collect_nodes(out_rel, -1, false, false);
141 }
142 #endif
143
144 uint ParmNode::ideal_reg() const {
145 switch( _con ) {
146 case TypeFunc::Control : // fall through
147 case TypeFunc::I_O : // fall through
148 case TypeFunc::Memory : return 0;
149 case TypeFunc::FramePtr : // fall through
150 case TypeFunc::ReturnAdr: return Op_RegP;
151 default : assert( _con > TypeFunc::Parms, "" );
152 // fall through
153 case TypeFunc::Parms : {
154 // Type of argument being passed
155 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
156 return t->ideal_reg();
157 }
158 }
159 ShouldNotReachHere();
160 return 0;
161 }
162
163 //=============================================================================
164 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
165 init_req(TypeFunc::Control,cntrl);
166 init_req(TypeFunc::I_O,i_o);
167 init_req(TypeFunc::Memory,memory);
168 init_req(TypeFunc::FramePtr,frameptr);
169 init_req(TypeFunc::ReturnAdr,retadr);
170 }
171
172 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
173 return remove_dead_region(phase, can_reshape) ? this : NULL;
174 }
175
176 const Type* ReturnNode::Value(PhaseGVN* phase) const {
177 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
178 ? Type::TOP
179 : Type::BOTTOM;
180 }
689 const Type* CallNode::Value(PhaseGVN* phase) const {
690 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
691 return tf()->range();
692 }
693
694 //------------------------------calling_convention-----------------------------
695 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
696 // Use the standard compiler calling convention
697 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
698 }
699
700
701 //------------------------------match------------------------------------------
702 // Construct projections for control, I/O, memory-fields, ..., and
703 // return result(s) along with their RegMask info
704 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
705 switch (proj->_con) {
706 case TypeFunc::Control:
707 case TypeFunc::I_O:
708 case TypeFunc::Memory:
709 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
710
711 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
712 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
713 // 2nd half of doubles and longs
714 return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
715
716 case TypeFunc::Parms: { // Normal returns
717 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
718 OptoRegPair regs = is_CallRuntime()
719 ? match->c_return_value(ideal_reg,true) // Calls into C runtime
720 : match-> return_value(ideal_reg,true); // Calls into compiled Java code
721 RegMask rm = RegMask(regs.first());
722 if( OptoReg::is_valid(regs.second()) )
723 rm.Insert( regs.second() );
724 return new MachProjNode(this,proj->_con,rm,ideal_reg);
725 }
726
727 case TypeFunc::ReturnAdr:
728 case TypeFunc::FramePtr:
729 default:
730 ShouldNotReachHere();
731 }
732 return NULL;
733 }
734
735 // Do we Match on this edge index or not? Match no edges
736 uint CallNode::match_edge(uint idx) const {
737 return 0;
820
821 // Returns the unique CheckCastPP of a call
822 // or 'this' if there are several CheckCastPP or unexpected uses
823 // or returns NULL if there is no one.
824 Node *CallNode::result_cast() {
825 Node *cast = NULL;
826
827 Node *p = proj_out(TypeFunc::Parms);
828 if (p == NULL)
829 return NULL;
830
831 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
832 Node *use = p->fast_out(i);
833 if (use->is_CheckCastPP()) {
834 if (cast != NULL) {
835 return this; // more than 1 CheckCastPP
836 }
837 cast = use;
838 } else if (!use->is_Initialize() &&
839 !use->is_AddP() &&
840 use->Opcode() != Op_MemBarStoreStore) {
841 // Expected uses are restricted to a CheckCastPP, an Initialize
842 // node, a MemBarStoreStore (clone) and AddP nodes. If we
843 // encounter any other use (a Phi node can be seen in rare
844 // cases) return this to prevent incorrect optimizations.
845 return this;
846 }
847 }
848 return cast;
849 }
850
851
852 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
853 projs->fallthrough_proj = NULL;
854 projs->fallthrough_catchproj = NULL;
855 projs->fallthrough_ioproj = NULL;
856 projs->catchall_ioproj = NULL;
857 projs->catchall_catchproj = NULL;
858 projs->fallthrough_memproj = NULL;
859 projs->catchall_memproj = NULL;
860 projs->resproj = NULL;
875 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
876 cpn = cn->fast_out(k)->as_Proj();
877 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
878 if (cpn->_con == CatchProjNode::fall_through_index)
879 projs->fallthrough_catchproj = cpn;
880 else {
881 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
882 projs->catchall_catchproj = cpn;
883 }
884 }
885 }
886 break;
887 }
888 case TypeFunc::I_O:
889 if (pn->_is_io_use)
890 projs->catchall_ioproj = pn;
891 else
892 projs->fallthrough_ioproj = pn;
893 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
894 Node* e = pn->out(j);
895 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
896 assert(projs->exobj == NULL, "only one");
897 projs->exobj = e;
898 }
899 }
900 break;
901 case TypeFunc::Memory:
902 if (pn->_is_io_use)
903 projs->catchall_memproj = pn;
904 else
905 projs->fallthrough_memproj = pn;
906 break;
907 case TypeFunc::Parms:
908 projs->resproj = pn;
909 break;
910 default:
911 assert(false, "unexpected projection from allocation node.");
912 }
913 }
914
915 // The resproj may not exist because the result could be ignored
917 // swallows the exception but all the other must exist and be found.
918 assert(projs->fallthrough_proj != NULL, "must be found");
919 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
920 assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
921 assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found");
922 assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found");
923 assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found");
924 if (separate_io_proj) {
925 assert(!do_asserts || projs->catchall_memproj != NULL, "must be found");
926 assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found");
927 }
928 }
929
930 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
931 CallGenerator* cg = generator();
932 if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
933 // Check whether this MH handle call becomes a candidate for inlining
934 ciMethod* callee = cg->method();
935 vmIntrinsics::ID iid = callee->intrinsic_id();
936 if (iid == vmIntrinsics::_invokeBasic) {
937 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
938 phase->C->prepend_late_inline(cg);
939 set_generator(NULL);
940 }
941 } else {
942 assert(callee->has_member_arg(), "wrong type of call?");
943 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
944 phase->C->prepend_late_inline(cg);
945 set_generator(NULL);
946 }
947 }
948 }
949 return SafePointNode::Ideal(phase, can_reshape);
950 }
951
952 bool CallNode::is_call_to_arraycopystub() const {
953 if (_name != NULL && strstr(_name, "arraycopy") != 0) {
954 return true;
955 }
956 return false;
957 }
958
959 //=============================================================================
960 uint CallJavaNode::size_of() const { return sizeof(*this); }
961 uint CallJavaNode::cmp( const Node &n ) const {
962 CallJavaNode &call = (CallJavaNode&)n;
963 return CallNode::cmp(call) && _method == call._method &&
1072
1073
1074 //=============================================================================
1075 #ifndef PRODUCT
1076 void CallLeafNode::dump_spec(outputStream *st) const {
1077 st->print("# ");
1078 st->print("%s", _name);
1079 CallNode::dump_spec(st);
1080 }
1081 #endif
1082
1083 //=============================================================================
1084
1085 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1086 assert(verify_jvms(jvms), "jvms must match");
1087 int loc = jvms->locoff() + idx;
1088 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1089 // If current local idx is top then local idx - 1 could
1090 // be a long/double that needs to be killed since top could
1091 // represent the 2nd half ofthe long/double.
1092 uint ideal = in(loc -1)->ideal_reg();
1093 if (ideal == Op_RegD || ideal == Op_RegL) {
1094 // set other (low index) half to top
1095 set_req(loc - 1, in(loc));
1096 }
1097 }
1098 set_req(loc, c);
1099 }
1100
1101 uint SafePointNode::size_of() const { return sizeof(*this); }
1102 uint SafePointNode::cmp( const Node &n ) const {
1103 return (&n == this); // Always fail except on self
1104 }
1105
1106 //-------------------------set_next_exception----------------------------------
1107 void SafePointNode::set_next_exception(SafePointNode* n) {
1108 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
1109 if (len() == req()) {
1110 if (n != NULL) add_prec(n);
1111 } else {
1112 set_prec(req(), n);
1113 }
1114 }
1115
1116
1117 //----------------------------next_exception-----------------------------------
1118 SafePointNode* SafePointNode::next_exception() const {
1119 if (len() == req()) {
1120 return NULL;
1121 } else {
1122 Node* n = in(req());
1123 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
1124 return (SafePointNode*) n;
1125 }
1126 }
1127
1128
1129 //------------------------------Ideal------------------------------------------
1130 // Skip over any collapsed Regions
1131 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1132 return remove_dead_region(phase, can_reshape) ? this : NULL;
1133 }
1134
1135 //------------------------------Identity---------------------------------------
1136 // Remove obviously duplicate safepoints
1137 Node* SafePointNode::Identity(PhaseGVN* phase) {
1138
1139 // If you have back to back safepoints, remove one
1140 if( in(TypeFunc::Control)->is_SafePoint() )
1141 return in(TypeFunc::Control);
1142
1143 if( in(0)->is_Proj() ) {
1169 _replaced_nodes.dump(st);
1170 }
1171
1172 // The related nodes of a SafepointNode are all data inputs, excluding the
1173 // control boundary, as well as all outputs till level 2 (to include projection
1174 // nodes and targets). In compact mode, just include inputs till level 1 and
1175 // outputs as before.
1176 void SafePointNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
1177 if (compact) {
1178 this->collect_nodes(in_rel, 1, false, false);
1179 } else {
1180 this->collect_nodes_in_all_data(in_rel, false);
1181 }
1182 this->collect_nodes(out_rel, -2, false, false);
1183 }
1184 #endif
1185
1186 const RegMask &SafePointNode::in_RegMask(uint idx) const {
1187 if( idx < TypeFunc::Parms ) return RegMask::Empty;
1188 // Values outside the domain represent debug info
1189 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1190 }
1191 const RegMask &SafePointNode::out_RegMask() const {
1192 return RegMask::Empty;
1193 }
1194
1195
1196 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
1197 assert((int)grow_by > 0, "sanity");
1198 int monoff = jvms->monoff();
1199 int scloff = jvms->scloff();
1200 int endoff = jvms->endoff();
1201 assert(endoff == (int)req(), "no other states or debug info after me");
1202 Node* top = Compile::current()->top();
1203 for (uint i = 0; i < grow_by; i++) {
1204 ins_req(monoff, top);
1205 }
1206 jvms->set_monoff(monoff + grow_by);
1207 jvms->set_scloff(scloff + grow_by);
1208 jvms->set_endoff(endoff + grow_by);
1209 }
1269 AllocateNode* alloc,
1270 #endif
1271 uint first_index,
1272 uint n_fields) :
1273 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1274 #ifdef ASSERT
1275 _alloc(alloc),
1276 #endif
1277 _first_index(first_index),
1278 _n_fields(n_fields)
1279 {
1280 init_class_id(Class_SafePointScalarObject);
1281 }
1282
1283 // Do not allow value-numbering for SafePointScalarObject node.
1284 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1285 uint SafePointScalarObjectNode::cmp( const Node &n ) const {
1286 return (&n == this); // Always fail except on self
1287 }
1288
1289 uint SafePointScalarObjectNode::ideal_reg() const {
1290 return 0; // No matching to machine instruction
1291 }
1292
1293 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1294 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1295 }
1296
1297 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
1298 return RegMask::Empty;
1299 }
1300
1301 uint SafePointScalarObjectNode::match_edge(uint idx) const {
1302 return 0;
1303 }
1304
1305 SafePointScalarObjectNode*
1306 SafePointScalarObjectNode::clone(Dict* sosn_map) const {
1307 void* cached = (*sosn_map)[(void*)this];
1308 if (cached != NULL) {
1309 return (SafePointScalarObjectNode*)cached;
1310 }
1311 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1312 sosn_map->Insert((void*)this, (void*)res);
1313 return res;
1314 }
1374
1375 const Type* type = phase->type(Ideal_length());
1376 if (type->isa_int() && type->is_int()->_hi < 0) {
1377 if (can_reshape) {
1378 PhaseIterGVN *igvn = phase->is_IterGVN();
1379 // Unreachable fall through path (negative array length),
1380 // the allocation can only throw so disconnect it.
1381 Node* proj = proj_out(TypeFunc::Control);
1382 Node* catchproj = NULL;
1383 if (proj != NULL) {
1384 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1385 Node *cn = proj->fast_out(i);
1386 if (cn->is_Catch()) {
1387 catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index);
1388 break;
1389 }
1390 }
1391 }
1392 if (catchproj != NULL && catchproj->outcnt() > 0 &&
1393 (catchproj->outcnt() > 1 ||
1394 catchproj->unique_out()->Opcode() != Op_Halt)) {
1395 assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
1396 Node* nproj = catchproj->clone();
1397 igvn->register_new_node_with_optimizer(nproj);
1398
1399 Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr );
1400 frame = phase->transform(frame);
1401 // Halt & Catch Fire
1402 Node *halt = new HaltNode( nproj, frame );
1403 phase->C->root()->add_req(halt);
1404 phase->transform(halt);
1405
1406 igvn->replace_node(catchproj, phase->C->top());
1407 return this;
1408 }
1409 } else {
1410 // Can't correct it during regular GVN so register for IGVN
1411 phase->C->record_for_igvn(this);
1412 }
1413 }
1414 return NULL;
1821 }
1822 } else {
1823 // see if this lock comes from either half of an if and the
1824 // predecessors merges unlocks and the other half of the if
1825 // performs a lock.
1826 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
1827 // found unlock splitting to an if with locks on both branches.
1828 }
1829 }
1830
1831 if (lock_ops.length() > 0) {
1832 // add ourselves to the list of locks to be eliminated.
1833 lock_ops.append(this);
1834
1835 #ifndef PRODUCT
1836 if (PrintEliminateLocks) {
1837 int locks = 0;
1838 int unlocks = 0;
1839 for (int i = 0; i < lock_ops.length(); i++) {
1840 AbstractLockNode* lock = lock_ops.at(i);
1841 if (lock->Opcode() == Op_Lock)
1842 locks++;
1843 else
1844 unlocks++;
1845 if (Verbose) {
1846 lock->dump(1);
1847 }
1848 }
1849 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1850 }
1851 #endif
1852
1853 // for each of the identified locks, mark them
1854 // as eliminatable
1855 for (int i = 0; i < lock_ops.length(); i++) {
1856 AbstractLockNode* lock = lock_ops.at(i);
1857
1858 // Mark it eliminated by coarsening and update any counters
1859 #ifdef ASSERT
1860 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened");
1861 #endif
|
60 return remove_dead_region(phase, can_reshape) ? this : NULL;
61 }
62
63 //------------------------------calling_convention-----------------------------
64 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
65 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
66 }
67
68 //------------------------------Registers--------------------------------------
69 const RegMask &StartNode::in_RegMask(uint) const {
70 return RegMask::Empty;
71 }
72
73 //------------------------------match------------------------------------------
74 // Construct projections for incoming parameters, and their RegMask info
75 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
76 switch (proj->_con) {
77 case TypeFunc::Control:
78 case TypeFunc::I_O:
79 case TypeFunc::Memory:
80 return new MachProjNode(this,proj->_con,RegMask::Empty,static_cast<Opcodes>(MachProjNode::projType::unmatched_proj));
81 case TypeFunc::FramePtr:
82 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Opcodes::Op_RegP);
83 case TypeFunc::ReturnAdr:
84 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Opcodes::Op_RegP);
85 case TypeFunc::Parms:
86 default: {
87 uint parm_num = proj->_con - TypeFunc::Parms;
88 const Type *t = _domain->field_at(proj->_con);
89 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
90 return new ConNode(Type::TOP);
91 Opcodes ideal_reg = t->ideal_reg();
92 RegMask &rm = match->_calling_convention_mask[parm_num];
93 return new MachProjNode(this,proj->_con,rm,ideal_reg);
94 }
95 }
96 return NULL;
97 }
98
99 //------------------------------StartOSRNode----------------------------------
100 // The method start node for an on stack replacement adapter
101
102 //------------------------------osr_domain-----------------------------
103 const TypeTuple *StartOSRNode::osr_domain() {
104 const Type **fields = TypeTuple::fields(2);
105 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
106
107 return TypeTuple::make(TypeFunc::Parms+1, fields);
108 }
109
110 //=============================================================================
111 const char * const ParmNode::names[TypeFunc::Parms+1] = {
124 }
125
126 void ParmNode::dump_compact_spec(outputStream *st) const {
127 if (_con < TypeFunc::Parms) {
128 st->print("%s", names[_con]);
129 } else {
130 st->print("%d:", _con-TypeFunc::Parms);
131 // unconditionally dump bottom_type
132 bottom_type()->dump_on(st);
133 }
134 }
135
136 // For a ParmNode, all immediate inputs and outputs are considered relevant
137 // both in compact and standard representation.
138 void ParmNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
139 this->collect_nodes(in_rel, 1, false, false);
140 this->collect_nodes(out_rel, -1, false, false);
141 }
142 #endif
143
144 Opcodes ParmNode::ideal_reg() const {
145 switch( _con ) {
146 case TypeFunc::Control : // fall through
147 case TypeFunc::I_O : // fall through
148 case TypeFunc::Memory : return Opcodes::Op_Node;
149 case TypeFunc::FramePtr : // fall through
150 case TypeFunc::ReturnAdr: return Opcodes::Op_RegP;
151 default : assert( _con > TypeFunc::Parms, "" );
152 // fall through
153 case TypeFunc::Parms : {
154 // Type of argument being passed
155 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
156 return t->ideal_reg();
157 }
158 }
159 ShouldNotReachHere();
160 return Opcodes::Op_Node;
161 }
162
163 //=============================================================================
164 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
165 init_req(TypeFunc::Control,cntrl);
166 init_req(TypeFunc::I_O,i_o);
167 init_req(TypeFunc::Memory,memory);
168 init_req(TypeFunc::FramePtr,frameptr);
169 init_req(TypeFunc::ReturnAdr,retadr);
170 }
171
172 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
173 return remove_dead_region(phase, can_reshape) ? this : NULL;
174 }
175
176 const Type* ReturnNode::Value(PhaseGVN* phase) const {
177 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
178 ? Type::TOP
179 : Type::BOTTOM;
180 }
689 const Type* CallNode::Value(PhaseGVN* phase) const {
690 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
691 return tf()->range();
692 }
693
694 //------------------------------calling_convention-----------------------------
695 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
696 // Use the standard compiler calling convention
697 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
698 }
699
700
701 //------------------------------match------------------------------------------
702 // Construct projections for control, I/O, memory-fields, ..., and
703 // return result(s) along with their RegMask info
704 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
705 switch (proj->_con) {
706 case TypeFunc::Control:
707 case TypeFunc::I_O:
708 case TypeFunc::Memory:
709 return new MachProjNode(this,proj->_con,RegMask::Empty,static_cast<Opcodes>(MachProjNode::projType::unmatched_proj));
710
711 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
712 assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
713 // 2nd half of doubles and longs
714 return new MachProjNode(this,proj->_con, RegMask::Empty, static_cast<Opcodes>(OptoReg::Bad));
715
716 case TypeFunc::Parms: { // Normal returns
717 Opcodes ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
718 OptoRegPair regs = is_CallRuntime()
719 ? match->c_return_value(ideal_reg,true) // Calls into C runtime
720 : match-> return_value(ideal_reg,true); // Calls into compiled Java code
721 RegMask rm = RegMask(regs.first());
722 if( OptoReg::is_valid(regs.second()) )
723 rm.Insert( regs.second() );
724 return new MachProjNode(this,proj->_con,rm,ideal_reg);
725 }
726
727 case TypeFunc::ReturnAdr:
728 case TypeFunc::FramePtr:
729 default:
730 ShouldNotReachHere();
731 }
732 return NULL;
733 }
734
735 // Do we Match on this edge index or not? Match no edges
736 uint CallNode::match_edge(uint idx) const {
737 return 0;
820
821 // Returns the unique CheckCastPP of a call
822 // or 'this' if there are several CheckCastPP or unexpected uses
823 // or returns NULL if there is no one.
824 Node *CallNode::result_cast() {
825 Node *cast = NULL;
826
827 Node *p = proj_out(TypeFunc::Parms);
828 if (p == NULL)
829 return NULL;
830
831 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
832 Node *use = p->fast_out(i);
833 if (use->is_CheckCastPP()) {
834 if (cast != NULL) {
835 return this; // more than 1 CheckCastPP
836 }
837 cast = use;
838 } else if (!use->is_Initialize() &&
839 !use->is_AddP() &&
840 use->Opcode() != Opcodes::Op_MemBarStoreStore) {
841 // Expected uses are restricted to a CheckCastPP, an Initialize
842 // node, a MemBarStoreStore (clone) and AddP nodes. If we
843 // encounter any other use (a Phi node can be seen in rare
844 // cases) return this to prevent incorrect optimizations.
845 return this;
846 }
847 }
848 return cast;
849 }
850
851
852 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) {
853 projs->fallthrough_proj = NULL;
854 projs->fallthrough_catchproj = NULL;
855 projs->fallthrough_ioproj = NULL;
856 projs->catchall_ioproj = NULL;
857 projs->catchall_catchproj = NULL;
858 projs->fallthrough_memproj = NULL;
859 projs->catchall_memproj = NULL;
860 projs->resproj = NULL;
875 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
876 cpn = cn->fast_out(k)->as_Proj();
877 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
878 if (cpn->_con == CatchProjNode::fall_through_index)
879 projs->fallthrough_catchproj = cpn;
880 else {
881 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
882 projs->catchall_catchproj = cpn;
883 }
884 }
885 }
886 break;
887 }
888 case TypeFunc::I_O:
889 if (pn->_is_io_use)
890 projs->catchall_ioproj = pn;
891 else
892 projs->fallthrough_ioproj = pn;
893 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
894 Node* e = pn->out(j);
895 if (e->Opcode() == Opcodes::Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
896 assert(projs->exobj == NULL, "only one");
897 projs->exobj = e;
898 }
899 }
900 break;
901 case TypeFunc::Memory:
902 if (pn->_is_io_use)
903 projs->catchall_memproj = pn;
904 else
905 projs->fallthrough_memproj = pn;
906 break;
907 case TypeFunc::Parms:
908 projs->resproj = pn;
909 break;
910 default:
911 assert(false, "unexpected projection from allocation node.");
912 }
913 }
914
915 // The resproj may not exist because the result could be ignored
917 // swallows the exception but all the other must exist and be found.
918 assert(projs->fallthrough_proj != NULL, "must be found");
919 do_asserts = do_asserts && !Compile::current()->inlining_incrementally();
920 assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found");
921 assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found");
922 assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found");
923 assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found");
924 if (separate_io_proj) {
925 assert(!do_asserts || projs->catchall_memproj != NULL, "must be found");
926 assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found");
927 }
928 }
929
930 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
931 CallGenerator* cg = generator();
932 if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
933 // Check whether this MH handle call becomes a candidate for inlining
934 ciMethod* callee = cg->method();
935 vmIntrinsics::ID iid = callee->intrinsic_id();
936 if (iid == vmIntrinsics::_invokeBasic) {
937 if (in(TypeFunc::Parms)->Opcode() == Opcodes::Op_ConP) {
938 phase->C->prepend_late_inline(cg);
939 set_generator(NULL);
940 }
941 } else {
942 assert(callee->has_member_arg(), "wrong type of call?");
943 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Opcodes::Op_ConP) {
944 phase->C->prepend_late_inline(cg);
945 set_generator(NULL);
946 }
947 }
948 }
949 return SafePointNode::Ideal(phase, can_reshape);
950 }
951
952 bool CallNode::is_call_to_arraycopystub() const {
953 if (_name != NULL && strstr(_name, "arraycopy") != 0) {
954 return true;
955 }
956 return false;
957 }
958
959 //=============================================================================
960 uint CallJavaNode::size_of() const { return sizeof(*this); }
961 uint CallJavaNode::cmp( const Node &n ) const {
962 CallJavaNode &call = (CallJavaNode&)n;
963 return CallNode::cmp(call) && _method == call._method &&
1072
1073
1074 //=============================================================================
1075 #ifndef PRODUCT
1076 void CallLeafNode::dump_spec(outputStream *st) const {
1077 st->print("# ");
1078 st->print("%s", _name);
1079 CallNode::dump_spec(st);
1080 }
1081 #endif
1082
1083 //=============================================================================
1084
1085 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
1086 assert(verify_jvms(jvms), "jvms must match");
1087 int loc = jvms->locoff() + idx;
1088 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
1089 // If current local idx is top then local idx - 1 could
1090 // be a long/double that needs to be killed since top could
1091 // represent the 2nd half ofthe long/double.
1092 Opcodes ideal = in(loc -1)->ideal_reg();
1093 if (ideal == Opcodes::Op_RegD || ideal == Opcodes::Op_RegL) {
1094 // set other (low index) half to top
1095 set_req(loc - 1, in(loc));
1096 }
1097 }
1098 set_req(loc, c);
1099 }
1100
1101 uint SafePointNode::size_of() const { return sizeof(*this); }
1102 uint SafePointNode::cmp( const Node &n ) const {
1103 return (&n == this); // Always fail except on self
1104 }
1105
1106 //-------------------------set_next_exception----------------------------------
1107 void SafePointNode::set_next_exception(SafePointNode* n) {
1108 assert(n == NULL || n->Opcode() == Opcodes::Op_SafePoint, "correct value for next_exception");
1109 if (len() == req()) {
1110 if (n != NULL) add_prec(n);
1111 } else {
1112 set_prec(req(), n);
1113 }
1114 }
1115
1116
1117 //----------------------------next_exception-----------------------------------
1118 SafePointNode* SafePointNode::next_exception() const {
1119 if (len() == req()) {
1120 return NULL;
1121 } else {
1122 Node* n = in(req());
1123 assert(n == NULL || n->Opcode() == Opcodes::Op_SafePoint, "no other uses of prec edges");
1124 return (SafePointNode*) n;
1125 }
1126 }
1127
1128
1129 //------------------------------Ideal------------------------------------------
1130 // Skip over any collapsed Regions
1131 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1132 return remove_dead_region(phase, can_reshape) ? this : NULL;
1133 }
1134
1135 //------------------------------Identity---------------------------------------
1136 // Remove obviously duplicate safepoints
1137 Node* SafePointNode::Identity(PhaseGVN* phase) {
1138
1139 // If you have back to back safepoints, remove one
1140 if( in(TypeFunc::Control)->is_SafePoint() )
1141 return in(TypeFunc::Control);
1142
1143 if( in(0)->is_Proj() ) {
1169 _replaced_nodes.dump(st);
1170 }
1171
1172 // The related nodes of a SafepointNode are all data inputs, excluding the
1173 // control boundary, as well as all outputs till level 2 (to include projection
1174 // nodes and targets). In compact mode, just include inputs till level 1 and
1175 // outputs as before.
1176 void SafePointNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
1177 if (compact) {
1178 this->collect_nodes(in_rel, 1, false, false);
1179 } else {
1180 this->collect_nodes_in_all_data(in_rel, false);
1181 }
1182 this->collect_nodes(out_rel, -2, false, false);
1183 }
1184 #endif
1185
1186 const RegMask &SafePointNode::in_RegMask(uint idx) const {
1187 if( idx < TypeFunc::Parms ) return RegMask::Empty;
1188 // Values outside the domain represent debug info
1189 return *(Compile::current()->matcher()->idealreg2debugmask[static_cast<uint>(in(idx)->ideal_reg())]);
1190 }
1191 const RegMask &SafePointNode::out_RegMask() const {
1192 return RegMask::Empty;
1193 }
1194
1195
1196 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
1197 assert((int)grow_by > 0, "sanity");
1198 int monoff = jvms->monoff();
1199 int scloff = jvms->scloff();
1200 int endoff = jvms->endoff();
1201 assert(endoff == (int)req(), "no other states or debug info after me");
1202 Node* top = Compile::current()->top();
1203 for (uint i = 0; i < grow_by; i++) {
1204 ins_req(monoff, top);
1205 }
1206 jvms->set_monoff(monoff + grow_by);
1207 jvms->set_scloff(scloff + grow_by);
1208 jvms->set_endoff(endoff + grow_by);
1209 }
1269 AllocateNode* alloc,
1270 #endif
1271 uint first_index,
1272 uint n_fields) :
1273 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1274 #ifdef ASSERT
1275 _alloc(alloc),
1276 #endif
1277 _first_index(first_index),
1278 _n_fields(n_fields)
1279 {
1280 init_class_id(Class_SafePointScalarObject);
1281 }
1282
1283 // Do not allow value-numbering for SafePointScalarObject node.
1284 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1285 uint SafePointScalarObjectNode::cmp( const Node &n ) const {
1286 return (&n == this); // Always fail except on self
1287 }
1288
1289 Opcodes SafePointScalarObjectNode::ideal_reg() const {
1290 return Opcodes::Op_Node; // No matching to machine instruction
1291 }
1292
1293 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1294 return *(Compile::current()->matcher()->idealreg2debugmask[static_cast<uint>(in(idx)->ideal_reg())]);
1295 }
1296
1297 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
1298 return RegMask::Empty;
1299 }
1300
1301 uint SafePointScalarObjectNode::match_edge(uint idx) const {
1302 return 0;
1303 }
1304
1305 SafePointScalarObjectNode*
1306 SafePointScalarObjectNode::clone(Dict* sosn_map) const {
1307 void* cached = (*sosn_map)[(void*)this];
1308 if (cached != NULL) {
1309 return (SafePointScalarObjectNode*)cached;
1310 }
1311 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1312 sosn_map->Insert((void*)this, (void*)res);
1313 return res;
1314 }
1374
1375 const Type* type = phase->type(Ideal_length());
1376 if (type->isa_int() && type->is_int()->_hi < 0) {
1377 if (can_reshape) {
1378 PhaseIterGVN *igvn = phase->is_IterGVN();
1379 // Unreachable fall through path (negative array length),
1380 // the allocation can only throw so disconnect it.
1381 Node* proj = proj_out(TypeFunc::Control);
1382 Node* catchproj = NULL;
1383 if (proj != NULL) {
1384 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1385 Node *cn = proj->fast_out(i);
1386 if (cn->is_Catch()) {
1387 catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index);
1388 break;
1389 }
1390 }
1391 }
1392 if (catchproj != NULL && catchproj->outcnt() > 0 &&
1393 (catchproj->outcnt() > 1 ||
1394 catchproj->unique_out()->Opcode() != Opcodes::Op_Halt)) {
1395 assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
1396 Node* nproj = catchproj->clone();
1397 igvn->register_new_node_with_optimizer(nproj);
1398
1399 Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr );
1400 frame = phase->transform(frame);
1401 // Halt & Catch Fire
1402 Node *halt = new HaltNode( nproj, frame );
1403 phase->C->root()->add_req(halt);
1404 phase->transform(halt);
1405
1406 igvn->replace_node(catchproj, phase->C->top());
1407 return this;
1408 }
1409 } else {
1410 // Can't correct it during regular GVN so register for IGVN
1411 phase->C->record_for_igvn(this);
1412 }
1413 }
1414 return NULL;
1821 }
1822 } else {
1823 // see if this lock comes from either half of an if and the
1824 // predecessors merges unlocks and the other half of the if
1825 // performs a lock.
1826 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
1827 // found unlock splitting to an if with locks on both branches.
1828 }
1829 }
1830
1831 if (lock_ops.length() > 0) {
1832 // add ourselves to the list of locks to be eliminated.
1833 lock_ops.append(this);
1834
1835 #ifndef PRODUCT
1836 if (PrintEliminateLocks) {
1837 int locks = 0;
1838 int unlocks = 0;
1839 for (int i = 0; i < lock_ops.length(); i++) {
1840 AbstractLockNode* lock = lock_ops.at(i);
1841 if (lock->Opcode() == Opcodes::Op_Lock)
1842 locks++;
1843 else
1844 unlocks++;
1845 if (Verbose) {
1846 lock->dump(1);
1847 }
1848 }
1849 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1850 }
1851 #endif
1852
1853 // for each of the identified locks, mark them
1854 // as eliminatable
1855 for (int i = 0; i < lock_ops.length(); i++) {
1856 AbstractLockNode* lock = lock_ops.at(i);
1857
1858 // Mark it eliminated by coarsening and update any counters
1859 #ifdef ASSERT
1860 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened");
1861 #endif
|