57 return remove_dead_region(phase, can_reshape) ? this : NULL;
58 }
59
60 //------------------------------calling_convention-----------------------------
61 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
62 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
63 }
64
65 //------------------------------Registers--------------------------------------
66 const RegMask &StartNode::in_RegMask(uint) const {
67 return RegMask::Empty;
68 }
69
70 //------------------------------match------------------------------------------
71 // Construct projections for incoming parameters, and their RegMask info
72 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
73 switch (proj->_con) {
74 case TypeFunc::Control:
75 case TypeFunc::I_O:
76 case TypeFunc::Memory:
77 return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
78 case TypeFunc::FramePtr:
79 return new (match->C) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
80 case TypeFunc::ReturnAdr:
81 return new (match->C) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
82 case TypeFunc::Parms:
83 default: {
84 uint parm_num = proj->_con - TypeFunc::Parms;
85 const Type *t = _domain->field_at(proj->_con);
86 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
87 return new (match->C) ConNode(Type::TOP);
88 uint ideal_reg = t->ideal_reg();
89 RegMask &rm = match->_calling_convention_mask[parm_num];
90 return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg);
91 }
92 }
93 return NULL;
94 }
95
96 //------------------------------StartOSRNode----------------------------------
97 // The method start node for an on stack replacement adapter
98
99 //------------------------------osr_domain-----------------------------
100 const TypeTuple *StartOSRNode::osr_domain() {
101 const Type **fields = TypeTuple::fields(2);
102 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
103
104 return TypeTuple::make(TypeFunc::Parms+1, fields);
105 }
106
107 //=============================================================================
108 const char * const ParmNode::names[TypeFunc::Parms+1] = {
109 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
110 };
668 const Type *CallNode::Value(PhaseTransform *phase) const {
669 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
670 return tf()->range();
671 }
672
673 //------------------------------calling_convention-----------------------------
674 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
675 // Use the standard compiler calling convention
676 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
677 }
678
679
680 //------------------------------match------------------------------------------
681 // Construct projections for control, I/O, memory-fields, ..., and
682 // return result(s) along with their RegMask info
683 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
684 switch (proj->_con) {
685 case TypeFunc::Control:
686 case TypeFunc::I_O:
687 case TypeFunc::Memory:
688 return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
689
690 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
691 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
692 // 2nd half of doubles and longs
693 return new (match->C) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
694
695 case TypeFunc::Parms: { // Normal returns
696 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
697 OptoRegPair regs = is_CallRuntime()
698 ? match->c_return_value(ideal_reg,true) // Calls into C runtime
699 : match-> return_value(ideal_reg,true); // Calls into compiled Java code
700 RegMask rm = RegMask(regs.first());
701 if( OptoReg::is_valid(regs.second()) )
702 rm.Insert( regs.second() );
703 return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg);
704 }
705
706 case TypeFunc::ReturnAdr:
707 case TypeFunc::FramePtr:
708 default:
709 ShouldNotReachHere();
710 }
711 return NULL;
712 }
713
714 // Do we Match on this edge index or not? Match no edges
715 uint CallNode::match_edge(uint idx) const {
716 return 0;
717 }
718
719 //
720 // Determine whether the call could modify the field of the specified
721 // instance at the specified offset.
722 //
723 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
1271 // Unreachable fall through path (negative array length),
1272 // the allocation can only throw so disconnect it.
1273 Node* proj = proj_out(TypeFunc::Control);
1274 Node* catchproj = NULL;
1275 if (proj != NULL) {
1276 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1277 Node *cn = proj->fast_out(i);
1278 if (cn->is_Catch()) {
1279 catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index);
1280 break;
1281 }
1282 }
1283 }
1284 if (catchproj != NULL && catchproj->outcnt() > 0 &&
1285 (catchproj->outcnt() > 1 ||
1286 catchproj->unique_out()->Opcode() != Op_Halt)) {
1287 assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
1288 Node* nproj = catchproj->clone();
1289 igvn->register_new_node_with_optimizer(nproj);
1290
1291 Node *frame = new (phase->C) ParmNode( phase->C->start(), TypeFunc::FramePtr );
1292 frame = phase->transform(frame);
1293 // Halt & Catch Fire
1294 Node *halt = new (phase->C) HaltNode( nproj, frame );
1295 phase->C->root()->add_req(halt);
1296 phase->transform(halt);
1297
1298 igvn->replace_node(catchproj, phase->C->top());
1299 return this;
1300 }
1301 } else {
1302 // Can't correct it during regular GVN so register for IGVN
1303 phase->C->record_for_igvn(this);
1304 }
1305 }
1306 return NULL;
1307 }
1308
1309 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1310 // CastII, if appropriate. If we are not allowed to create new nodes, and
1311 // a CastII is appropriate, return NULL.
1312 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
1313 Node *length = in(AllocateNode::ALength);
1314 assert(length != NULL, "length is not null");
1316 const TypeInt* length_type = phase->find_int_type(length);
1317 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1318
1319 if (ary_type != NULL && length_type != NULL) {
1320 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1321 if (narrow_length_type != length_type) {
1322 // Assert one of:
1323 // - the narrow_length is 0
1324 // - the narrow_length is not wider than length
1325 assert(narrow_length_type == TypeInt::ZERO ||
1326 length_type->is_con() && narrow_length_type->is_con() &&
1327 (narrow_length_type->_hi <= length_type->_lo) ||
1328 (narrow_length_type->_hi <= length_type->_hi &&
1329 narrow_length_type->_lo >= length_type->_lo),
1330 "narrow type must be narrower than length type");
1331
1332 // Return NULL if new nodes are not allowed
1333 if (!allow_new_nodes) return NULL;
1334 // Create a cast which is control dependent on the initialization to
1335 // propagate the fact that the array length must be positive.
1336 length = new (phase->C) CastIINode(length, narrow_length_type);
1337 length->set_req(0, initialization()->proj_out(0));
1338 }
1339 }
1340
1341 return length;
1342 }
1343
1344 //=============================================================================
1345 uint LockNode::size_of() const { return sizeof(*this); }
1346
1347 // Redundant lock elimination
1348 //
1349 // There are various patterns of locking where we release and
1350 // immediately reacquire a lock in a piece of code where no operations
1351 // occur in between that would be observable. In those cases we can
1352 // skip releasing and reacquiring the lock without violating any
1353 // fairness requirements. Doing this around a loop could cause a lock
1354 // to be held for a very long time so we concentrate on non-looping
1355 // control flow. We also require that the operations are fully
1356 // redundant meaning that we don't introduce new lock operations on
|
57 return remove_dead_region(phase, can_reshape) ? this : NULL;
58 }
59
60 //------------------------------calling_convention-----------------------------
61 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
62 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
63 }
64
65 //------------------------------Registers--------------------------------------
66 const RegMask &StartNode::in_RegMask(uint) const {
67 return RegMask::Empty;
68 }
69
70 //------------------------------match------------------------------------------
71 // Construct projections for incoming parameters, and their RegMask info
72 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
73 switch (proj->_con) {
74 case TypeFunc::Control:
75 case TypeFunc::I_O:
76 case TypeFunc::Memory:
77 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
78 case TypeFunc::FramePtr:
79 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
80 case TypeFunc::ReturnAdr:
81 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
82 case TypeFunc::Parms:
83 default: {
84 uint parm_num = proj->_con - TypeFunc::Parms;
85 const Type *t = _domain->field_at(proj->_con);
86 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
87 return new ConNode(Type::TOP);
88 uint ideal_reg = t->ideal_reg();
89 RegMask &rm = match->_calling_convention_mask[parm_num];
90 return new MachProjNode(this,proj->_con,rm,ideal_reg);
91 }
92 }
93 return NULL;
94 }
95
96 //------------------------------StartOSRNode----------------------------------
97 // The method start node for an on stack replacement adapter
98
99 //------------------------------osr_domain-----------------------------
100 const TypeTuple *StartOSRNode::osr_domain() {
101 const Type **fields = TypeTuple::fields(2);
102 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
103
104 return TypeTuple::make(TypeFunc::Parms+1, fields);
105 }
106
107 //=============================================================================
108 const char * const ParmNode::names[TypeFunc::Parms+1] = {
109 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
110 };
668 const Type *CallNode::Value(PhaseTransform *phase) const {
669 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
670 return tf()->range();
671 }
672
673 //------------------------------calling_convention-----------------------------
674 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
675 // Use the standard compiler calling convention
676 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
677 }
678
679
680 //------------------------------match------------------------------------------
681 // Construct projections for control, I/O, memory-fields, ..., and
682 // return result(s) along with their RegMask info
683 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
684 switch (proj->_con) {
685 case TypeFunc::Control:
686 case TypeFunc::I_O:
687 case TypeFunc::Memory:
688 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
689
690 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
691 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
692 // 2nd half of doubles and longs
693 return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
694
695 case TypeFunc::Parms: { // Normal returns
696 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
697 OptoRegPair regs = is_CallRuntime()
698 ? match->c_return_value(ideal_reg,true) // Calls into C runtime
699 : match-> return_value(ideal_reg,true); // Calls into compiled Java code
700 RegMask rm = RegMask(regs.first());
701 if( OptoReg::is_valid(regs.second()) )
702 rm.Insert( regs.second() );
703 return new MachProjNode(this,proj->_con,rm,ideal_reg);
704 }
705
706 case TypeFunc::ReturnAdr:
707 case TypeFunc::FramePtr:
708 default:
709 ShouldNotReachHere();
710 }
711 return NULL;
712 }
713
714 // Do we Match on this edge index or not? Match no edges
715 uint CallNode::match_edge(uint idx) const {
716 return 0;
717 }
718
719 //
720 // Determine whether the call could modify the field of the specified
721 // instance at the specified offset.
722 //
723 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
1271 // Unreachable fall through path (negative array length),
1272 // the allocation can only throw so disconnect it.
1273 Node* proj = proj_out(TypeFunc::Control);
1274 Node* catchproj = NULL;
1275 if (proj != NULL) {
1276 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1277 Node *cn = proj->fast_out(i);
1278 if (cn->is_Catch()) {
1279 catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index);
1280 break;
1281 }
1282 }
1283 }
1284 if (catchproj != NULL && catchproj->outcnt() > 0 &&
1285 (catchproj->outcnt() > 1 ||
1286 catchproj->unique_out()->Opcode() != Op_Halt)) {
1287 assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
1288 Node* nproj = catchproj->clone();
1289 igvn->register_new_node_with_optimizer(nproj);
1290
1291 Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr );
1292 frame = phase->transform(frame);
1293 // Halt & Catch Fire
1294 Node *halt = new HaltNode( nproj, frame );
1295 phase->C->root()->add_req(halt);
1296 phase->transform(halt);
1297
1298 igvn->replace_node(catchproj, phase->C->top());
1299 return this;
1300 }
1301 } else {
1302 // Can't correct it during regular GVN so register for IGVN
1303 phase->C->record_for_igvn(this);
1304 }
1305 }
1306 return NULL;
1307 }
1308
1309 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1310 // CastII, if appropriate. If we are not allowed to create new nodes, and
1311 // a CastII is appropriate, return NULL.
1312 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
1313 Node *length = in(AllocateNode::ALength);
1314 assert(length != NULL, "length is not null");
1316 const TypeInt* length_type = phase->find_int_type(length);
1317 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1318
1319 if (ary_type != NULL && length_type != NULL) {
1320 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1321 if (narrow_length_type != length_type) {
1322 // Assert one of:
1323 // - the narrow_length is 0
1324 // - the narrow_length is not wider than length
1325 assert(narrow_length_type == TypeInt::ZERO ||
1326 length_type->is_con() && narrow_length_type->is_con() &&
1327 (narrow_length_type->_hi <= length_type->_lo) ||
1328 (narrow_length_type->_hi <= length_type->_hi &&
1329 narrow_length_type->_lo >= length_type->_lo),
1330 "narrow type must be narrower than length type");
1331
1332 // Return NULL if new nodes are not allowed
1333 if (!allow_new_nodes) return NULL;
1334 // Create a cast which is control dependent on the initialization to
1335 // propagate the fact that the array length must be positive.
1336 length = new CastIINode(length, narrow_length_type);
1337 length->set_req(0, initialization()->proj_out(0));
1338 }
1339 }
1340
1341 return length;
1342 }
1343
1344 //=============================================================================
1345 uint LockNode::size_of() const { return sizeof(*this); }
1346
1347 // Redundant lock elimination
1348 //
1349 // There are various patterns of locking where we release and
1350 // immediately reacquire a lock in a piece of code where no operations
1351 // occur in between that would be observable. In those cases we can
1352 // skip releasing and reacquiring the lock without violating any
1353 // fairness requirements. Doing this around a loop could cause a lock
1354 // to be held for a very long time so we concentrate on non-looping
1355 // control flow. We also require that the operations are fully
1356 // redundant meaning that we don't introduce new lock operations on
|