76
77 int num_regs = RegMask::num_registers(ireg);
78 bool is_vect = RegMask::is_vector(ireg);
79 if( w_mask->overlap( *o_mask ) && // Overlap AND
80 ((num_regs == 1) // Single use or aligned
81 || is_vect // or vector
82 || !is_vect && o_mask->is_aligned_pairs()) ) {
83 assert(!is_vect || o_mask->is_aligned_sets(num_regs), "vectors are aligned");
84 // Don't come here for mis-aligned doubles
85 w_o_mask = w_mask;
86 } else { // wide ideal mask does not overlap with o_mask
87 // Mis-aligned doubles come here and XMM->FPR moves on x86.
88 w_o_mask = o_mask; // Must target desired registers
89 // Does the ideal-reg-mask overlap with o_mask? I.e., can I use
90 // a reg-reg move or do I need a trip across register classes
91 // (and thus through memory)?
92 if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() )
93 // Here we assume a trip through memory is required.
94 w_i_mask = &C->FIRST_STACK_mask();
95 }
96 return new (C) MachSpillCopyNode(spill_type, def, *w_i_mask, *w_o_mask );
97 }
98
99 //------------------------------insert_proj------------------------------------
100 // Insert the spill at chosen location. Skip over any intervening Proj's or
101 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block
102 // instead. Update high-pressure indices. Create a new live range.
103 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
104 // Skip intervening ProjNodes. Do not insert between a ProjNode and
105 // its definer.
106 while( i < b->number_of_nodes() &&
107 (b->get_node(i)->is_Proj() ||
108 b->get_node(i)->is_Phi() ) )
109 i++;
110
111 // Do not insert between a call and his Catch
112 if( b->get_node(i)->is_Catch() ) {
113 // Put the instruction at the top of the fall-thru block.
114 // Find the fall-thru projection
115 while( 1 ) {
116 const CatchProjNode *cp = b->get_node(++i)->as_CatchProj();
646 break;
647 }
648 // must be looking at a phi
649 if (_lrg_map.find_id(n1) == lidxs.at(slidx)) {
650 // found the necessary phi
651 needs_phi = false;
652 has_phi = true;
653 // initialize the Reaches entry for this LRG
654 Reachblock[slidx] = phi;
655 break;
656 } // end if found correct phi
657 } // end for all phi's
658
659 // If a phi is needed or exist, check for it
660 if( needs_phi || has_phi ) {
661 // add new phinode if one not already found
662 if( needs_phi ) {
663 // create a new phi node and insert it into the block
664 // type is taken from left over pointer to a predecessor
665 assert(n3,"No non-NULL reaching DEF for a Phi");
666 phi = new (C) PhiNode(b->head(), n3->bottom_type());
667 // initialize the Reaches entry for this LRG
668 Reachblock[slidx] = phi;
669
670 // add node to block & node_to_block mapping
671 insert_proj(b, insidx++, phi, maxlrg++);
672 non_phi++;
673 // Reset new phi's mapping to be the spilling live range
674 _lrg_map.map(phi->_idx, lidx);
675 assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping");
676 } // end if not found correct phi
677 // Here you have either found or created the Phi, so record it
678 assert(phi != NULL,"Must have a Phi Node here");
679 phis->push(phi);
680 // PhiNodes should either force the LRG UP or DOWN depending
681 // on its inputs and the register pressure in the Phi's block.
682 UPblock[slidx] = true; // Assume new DEF is UP
683 // If entering a high-pressure area with no immediate use,
684 // assume Phi is DOWN
685 if( is_high_pressure( b, &lrgs(lidx), b->end_idx()) && !prompt_use(b,lidx) )
686 UPblock[slidx] = false;
1058 if( dup == uup ) {
1059 if( dmask.overlap(umask) ) {
1060 // Both are either up or down, and there is overlap, No Split
1061 n->set_req(inpidx, def);
1062 }
1063 else { // Both are either up or down, and there is no overlap
1064 if( dup ) { // If UP, reg->reg copy
1065 // COPY ACROSS HERE - NO DEF - NO CISC SPILL
1066 maxlrg = split_USE(MachSpillCopyNode::RegToReg, def,b,n,inpidx,maxlrg,false,false, splits,slidx);
1067 // If it wasn't split bail
1068 if (!maxlrg) {
1069 return 0;
1070 }
1071 insidx++; // Reset iterator to skip USE side split
1072 }
1073 else { // DOWN, mem->mem copy
1074 // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL
1075 // First Split-UP to move value into Register
1076 uint def_ideal = def->ideal_reg();
1077 const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal];
1078 Node *spill = new (C) MachSpillCopyNode(MachSpillCopyNode::MemToReg, def, dmask, *tmp_rm);
1079 insert_proj( b, insidx, spill, maxlrg );
1080 // Then Split-DOWN as if previous Split was DEF
1081 maxlrg = split_USE(MachSpillCopyNode::RegToMem, spill,b,n,inpidx,maxlrg,false,false, splits,slidx);
1082 // If it wasn't split bail
1083 if (!maxlrg) {
1084 return 0;
1085 }
1086 insidx += 2; // Reset iterator to skip USE side splits
1087 }
1088 } // End else no overlap
1089 } // End if dup == uup
1090 // dup != uup, so check dup for direction of Split
1091 else {
1092 if( dup ) { // If UP, Split-DOWN and check Debug Info
1093 // If this node is already a SpillCopy, just patch the edge
1094 // except the case of spilling to stack.
1095 if( n->is_SpillCopy() ) {
1096 RegMask tmp_rm(umask);
1097 tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask);
1098 if( dmask.overlap(tmp_rm) ) {
1212 // ********** Split Left Over Mem-Mem Moves **********
1213 // Check for mem-mem copies and split them now. Do not do this
1214 // to copies about to be spilled; they will be Split shortly.
1215 if (copyidx) {
1216 Node *use = n->in(copyidx);
1217 uint useidx = _lrg_map.find_id(use);
1218 if (useidx < _lrg_map.max_lrg_id() && // This is not a new split
1219 OptoReg::is_stack(deflrg.reg()) &&
1220 deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack
1221 LRG &uselrg = lrgs(useidx);
1222 if( OptoReg::is_stack(uselrg.reg()) &&
1223 uselrg.reg() < LRG::SPILL_REG && // USE is from stack
1224 deflrg.reg() != uselrg.reg() ) { // Not trivially removed
1225 uint def_ideal_reg = n->bottom_type()->ideal_reg();
1226 const RegMask &def_rm = *Matcher::idealreg2regmask[def_ideal_reg];
1227 const RegMask &use_rm = n->in_RegMask(copyidx);
1228 if( def_rm.overlap(use_rm) && n->is_SpillCopy() ) { // Bug 4707800, 'n' may be a storeSSL
1229 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { // Check when generating nodes
1230 return 0;
1231 }
1232 Node *spill = new (C) MachSpillCopyNode(MachSpillCopyNode::MemToReg, use,use_rm,def_rm);
1233 n->set_req(copyidx,spill);
1234 n->as_MachSpillCopy()->set_in_RegMask(def_rm);
1235 // Put the spill just before the copy
1236 insert_proj( b, insidx++, spill, maxlrg++ );
1237 }
1238 }
1239 }
1240 }
1241 } // End For All Instructions in Block - Non-PHI Pass
1242
1243 // Check if each LRG is live out of this block so as not to propagate
1244 // beyond the last use of a LRG.
1245 for( slidx = 0; slidx < spill_cnt; slidx++ ) {
1246 uint defidx = lidxs.at(slidx);
1247 IndexSet *liveout = _live->live(b);
1248 if( !liveout->member(defidx) ) {
1249 #ifdef ASSERT
1250 // The index defidx is not live. Check the liveout array to ensure that
1251 // it contains no members which compress to defidx. Finding such an
1252 // instance may be a case to add liveout adjustment in compress_uf_map().
|
76
77 int num_regs = RegMask::num_registers(ireg);
78 bool is_vect = RegMask::is_vector(ireg);
79 if( w_mask->overlap( *o_mask ) && // Overlap AND
80 ((num_regs == 1) // Single use or aligned
81 || is_vect // or vector
82 || !is_vect && o_mask->is_aligned_pairs()) ) {
83 assert(!is_vect || o_mask->is_aligned_sets(num_regs), "vectors are aligned");
84 // Don't come here for mis-aligned doubles
85 w_o_mask = w_mask;
86 } else { // wide ideal mask does not overlap with o_mask
87 // Mis-aligned doubles come here and XMM->FPR moves on x86.
88 w_o_mask = o_mask; // Must target desired registers
89 // Does the ideal-reg-mask overlap with o_mask? I.e., can I use
90 // a reg-reg move or do I need a trip across register classes
91 // (and thus through memory)?
92 if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() )
93 // Here we assume a trip through memory is required.
94 w_i_mask = &C->FIRST_STACK_mask();
95 }
96 return new MachSpillCopyNode(spill_type, def, *w_i_mask, *w_o_mask );
97 }
98
99 //------------------------------insert_proj------------------------------------
100 // Insert the spill at chosen location. Skip over any intervening Proj's or
101 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block
102 // instead. Update high-pressure indices. Create a new live range.
103 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
104 // Skip intervening ProjNodes. Do not insert between a ProjNode and
105 // its definer.
106 while( i < b->number_of_nodes() &&
107 (b->get_node(i)->is_Proj() ||
108 b->get_node(i)->is_Phi() ) )
109 i++;
110
111 // Do not insert between a call and his Catch
112 if( b->get_node(i)->is_Catch() ) {
113 // Put the instruction at the top of the fall-thru block.
114 // Find the fall-thru projection
115 while( 1 ) {
116 const CatchProjNode *cp = b->get_node(++i)->as_CatchProj();
646 break;
647 }
648 // must be looking at a phi
649 if (_lrg_map.find_id(n1) == lidxs.at(slidx)) {
650 // found the necessary phi
651 needs_phi = false;
652 has_phi = true;
653 // initialize the Reaches entry for this LRG
654 Reachblock[slidx] = phi;
655 break;
656 } // end if found correct phi
657 } // end for all phi's
658
659 // If a phi is needed or exist, check for it
660 if( needs_phi || has_phi ) {
661 // add new phinode if one not already found
662 if( needs_phi ) {
663 // create a new phi node and insert it into the block
664 // type is taken from left over pointer to a predecessor
665 assert(n3,"No non-NULL reaching DEF for a Phi");
666 phi = new PhiNode(b->head(), n3->bottom_type());
667 // initialize the Reaches entry for this LRG
668 Reachblock[slidx] = phi;
669
670 // add node to block & node_to_block mapping
671 insert_proj(b, insidx++, phi, maxlrg++);
672 non_phi++;
673 // Reset new phi's mapping to be the spilling live range
674 _lrg_map.map(phi->_idx, lidx);
675 assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping");
676 } // end if not found correct phi
677 // Here you have either found or created the Phi, so record it
678 assert(phi != NULL,"Must have a Phi Node here");
679 phis->push(phi);
680 // PhiNodes should either force the LRG UP or DOWN depending
681 // on its inputs and the register pressure in the Phi's block.
682 UPblock[slidx] = true; // Assume new DEF is UP
683 // If entering a high-pressure area with no immediate use,
684 // assume Phi is DOWN
685 if( is_high_pressure( b, &lrgs(lidx), b->end_idx()) && !prompt_use(b,lidx) )
686 UPblock[slidx] = false;
1058 if( dup == uup ) {
1059 if( dmask.overlap(umask) ) {
1060 // Both are either up or down, and there is overlap, No Split
1061 n->set_req(inpidx, def);
1062 }
1063 else { // Both are either up or down, and there is no overlap
1064 if( dup ) { // If UP, reg->reg copy
1065 // COPY ACROSS HERE - NO DEF - NO CISC SPILL
1066 maxlrg = split_USE(MachSpillCopyNode::RegToReg, def,b,n,inpidx,maxlrg,false,false, splits,slidx);
1067 // If it wasn't split bail
1068 if (!maxlrg) {
1069 return 0;
1070 }
1071 insidx++; // Reset iterator to skip USE side split
1072 }
1073 else { // DOWN, mem->mem copy
1074 // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL
1075 // First Split-UP to move value into Register
1076 uint def_ideal = def->ideal_reg();
1077 const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal];
1078 Node *spill = new MachSpillCopyNode(MachSpillCopyNode::MemToReg, def, dmask, *tmp_rm);
1079 insert_proj( b, insidx, spill, maxlrg );
1080 // Then Split-DOWN as if previous Split was DEF
1081 maxlrg = split_USE(MachSpillCopyNode::RegToMem, spill,b,n,inpidx,maxlrg,false,false, splits,slidx);
1082 // If it wasn't split bail
1083 if (!maxlrg) {
1084 return 0;
1085 }
1086 insidx += 2; // Reset iterator to skip USE side splits
1087 }
1088 } // End else no overlap
1089 } // End if dup == uup
1090 // dup != uup, so check dup for direction of Split
1091 else {
1092 if( dup ) { // If UP, Split-DOWN and check Debug Info
1093 // If this node is already a SpillCopy, just patch the edge
1094 // except the case of spilling to stack.
1095 if( n->is_SpillCopy() ) {
1096 RegMask tmp_rm(umask);
1097 tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask);
1098 if( dmask.overlap(tmp_rm) ) {
1212 // ********** Split Left Over Mem-Mem Moves **********
1213 // Check for mem-mem copies and split them now. Do not do this
1214 // to copies about to be spilled; they will be Split shortly.
1215 if (copyidx) {
1216 Node *use = n->in(copyidx);
1217 uint useidx = _lrg_map.find_id(use);
1218 if (useidx < _lrg_map.max_lrg_id() && // This is not a new split
1219 OptoReg::is_stack(deflrg.reg()) &&
1220 deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack
1221 LRG &uselrg = lrgs(useidx);
1222 if( OptoReg::is_stack(uselrg.reg()) &&
1223 uselrg.reg() < LRG::SPILL_REG && // USE is from stack
1224 deflrg.reg() != uselrg.reg() ) { // Not trivially removed
1225 uint def_ideal_reg = n->bottom_type()->ideal_reg();
1226 const RegMask &def_rm = *Matcher::idealreg2regmask[def_ideal_reg];
1227 const RegMask &use_rm = n->in_RegMask(copyidx);
1228 if( def_rm.overlap(use_rm) && n->is_SpillCopy() ) { // Bug 4707800, 'n' may be a storeSSL
1229 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { // Check when generating nodes
1230 return 0;
1231 }
1232 Node *spill = new MachSpillCopyNode(MachSpillCopyNode::MemToReg, use,use_rm,def_rm);
1233 n->set_req(copyidx,spill);
1234 n->as_MachSpillCopy()->set_in_RegMask(def_rm);
1235 // Put the spill just before the copy
1236 insert_proj( b, insidx++, spill, maxlrg++ );
1237 }
1238 }
1239 }
1240 }
1241 } // End For All Instructions in Block - Non-PHI Pass
1242
1243 // Check if each LRG is live out of this block so as not to propagate
1244 // beyond the last use of a LRG.
1245 for( slidx = 0; slidx < spill_cnt; slidx++ ) {
1246 uint defidx = lidxs.at(slidx);
1247 IndexSet *liveout = _live->live(b);
1248 if( !liveout->member(defidx) ) {
1249 #ifdef ASSERT
1250 // The index defidx is not live. Check the liveout array to ensure that
1251 // it contains no members which compress to defidx. Finding such an
1252 // instance may be a case to add liveout adjustment in compress_uf_map().
|