src/share/vm/opto/coalesce.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7069452 Sdiff src/share/vm/opto

src/share/vm/opto/coalesce.cpp

Print this page




 122   assert( src < dst, "always union smaller" );
 123   _uf_map.map(dst,src);
 124 }
 125 
 126 //------------------------------new_lrg----------------------------------------
 127 void PhaseChaitin::new_lrg( const Node *x, uint lrg ) {
 128   // Make the Node->LRG mapping
 129   _names.extend(x->_idx,lrg);
 130   // Make the Union-Find mapping an identity function
 131   _uf_map.extend(lrg,lrg);
 132 }
 133 
 134 //------------------------------clone_projs------------------------------------
 135 // After cloning some rematerialized instruction, clone any MachProj's that
 136 // follow it.  Example: Intel zero is XOR, kills flags.  Sparc FP constants
 137 // use G3 as an address temp.
 138 int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) {
 139   Block *bcon = _cfg._bbs[con->_idx];
 140   uint cindex = bcon->find_node(con);
 141   Node *con_next = bcon->_nodes[cindex+1];
 142   if( con_next->in(0) != con || con_next->Opcode() != Op_MachProj )
 143     return false;               // No MachProj's follow
 144 
 145   // Copy kills after the cloned constant
 146   Node *kills = con_next->clone();
 147   kills->set_req( 0, copy );
 148   b->_nodes.insert( idx, kills );
 149   _cfg._bbs.map( kills->_idx, b );
 150   new_lrg( kills, maxlrg++ );
 151   return true;
 152 }
 153 
 154 //------------------------------compact----------------------------------------
 155 // Renumber the live ranges to compact them.  Makes the IFG smaller.
 156 void PhaseChaitin::compact() {
 157   // Current the _uf_map contains a series of short chains which are headed
 158   // by a self-cycle.  All the chains run from big numbers to little numbers.
 159   // The Find() call chases the chains & shortens them for the next Find call.
 160   // We are going to change this structure slightly.  Numbers above a moving
 161   // wave 'i' are unchanged.  Numbers below 'j' point directly to their
 162   // compacted live range with no further chaining.  There are no chains or


 295 // If I insert them in the wrong order then L128 will get clobbered before it
 296 // can get used by the second copy.  This cannot happen in the SSA model;
 297 // direct use-def chains get me the right value.  It DOES happen in the named
 298 // model so I have to handle the reordering of copies.
 299 //
 300 // In general, I need to topo-sort the placed copies to avoid conflicts.
 301 // Its possible to have a closed cycle of copies (e.g., recirculating the same
 302 // values around a loop).  In this case I need a temp to break the cycle.
 303 void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, uint dst_name, uint src_name ) {
 304 
 305   // Scan backwards for the locations of the last use of the dst_name.
 306   // I am about to clobber the dst_name, so the copy must be inserted
 307   // after the last use.  Last use is really first-use on a backwards scan.
 308   uint i = b->end_idx()-1;
 309   while( 1 ) {
 310     Node *n = b->_nodes[i];
 311     // Check for end of virtual copies; this is also the end of the
 312     // parallel renaming effort.
 313     if( n->_idx < _unique ) break;
 314     uint idx = n->is_Copy();
 315     assert( idx || n->is_Con() || n->Opcode() == Op_MachProj, "Only copies during parallel renaming" );
 316     if( idx && _phc.Find(n->in(idx)) == dst_name ) break;
 317     i--;
 318   }
 319   uint last_use_idx = i;
 320 
 321   // Also search for any kill of src_name that exits the block.
 322   // Since the copy uses src_name, I have to come before any kill.
 323   uint kill_src_idx = b->end_idx();
 324   // There can be only 1 kill that exits any block and that is
 325   // the last kill.  Thus it is the first kill on a backwards scan.
 326   i = b->end_idx()-1;
 327   while( 1 ) {
 328     Node *n = b->_nodes[i];
 329     // Check for end of virtual copies; this is also the end of the
 330     // parallel renaming effort.
 331     if( n->_idx < _unique ) break;
 332     assert( n->is_Copy() || n->is_Con() || n->Opcode() == Op_MachProj, "Only copies during parallel renaming" );
 333     if( _phc.Find(n) == src_name ) {
 334       kill_src_idx = i;
 335       break;
 336     }
 337     i--;
 338   }
 339   // Need a temp?  Last use of dst comes after the kill of src?
 340   if( last_use_idx >= kill_src_idx ) {
 341     // Need to break a cycle with a temp
 342     uint idx = copy->is_Copy();
 343     Node *tmp = copy->clone();
 344     _phc.new_lrg(tmp,_phc._maxlrg++);
 345     // Insert new temp between copy and source
 346     tmp ->set_req(idx,copy->in(idx));
 347     copy->set_req(idx,tmp);
 348     // Save source in temp early, before source is killed
 349     b->_nodes.insert(kill_src_idx,tmp);
 350     _phc._cfg._bbs.map( tmp->_idx, b );
 351     last_use_idx++;
 352   }




 122   assert( src < dst, "always union smaller" );
 123   _uf_map.map(dst,src);
 124 }
 125 
 126 //------------------------------new_lrg----------------------------------------
 127 void PhaseChaitin::new_lrg( const Node *x, uint lrg ) {
 128   // Make the Node->LRG mapping
 129   _names.extend(x->_idx,lrg);
 130   // Make the Union-Find mapping an identity function
 131   _uf_map.extend(lrg,lrg);
 132 }
 133 
 134 //------------------------------clone_projs------------------------------------
 135 // After cloning some rematerialized instruction, clone any MachProj's that
 136 // follow it.  Example: Intel zero is XOR, kills flags.  Sparc FP constants
 137 // use G3 as an address temp.
 138 int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) {
 139   Block *bcon = _cfg._bbs[con->_idx];
 140   uint cindex = bcon->find_node(con);
 141   Node *con_next = bcon->_nodes[cindex+1];
 142   if( con_next->in(0) != con || !con_next->is_MachProj() )
 143     return false;               // No MachProj's follow
 144 
 145   // Copy kills after the cloned constant
 146   Node *kills = con_next->clone();
 147   kills->set_req( 0, copy );
 148   b->_nodes.insert( idx, kills );
 149   _cfg._bbs.map( kills->_idx, b );
 150   new_lrg( kills, maxlrg++ );
 151   return true;
 152 }
 153 
 154 //------------------------------compact----------------------------------------
 155 // Renumber the live ranges to compact them.  Makes the IFG smaller.
 156 void PhaseChaitin::compact() {
 157   // Current the _uf_map contains a series of short chains which are headed
 158   // by a self-cycle.  All the chains run from big numbers to little numbers.
 159   // The Find() call chases the chains & shortens them for the next Find call.
 160   // We are going to change this structure slightly.  Numbers above a moving
 161   // wave 'i' are unchanged.  Numbers below 'j' point directly to their
 162   // compacted live range with no further chaining.  There are no chains or


 295 // If I insert them in the wrong order then L128 will get clobbered before it
 296 // can get used by the second copy.  This cannot happen in the SSA model;
 297 // direct use-def chains get me the right value.  It DOES happen in the named
 298 // model so I have to handle the reordering of copies.
 299 //
 300 // In general, I need to topo-sort the placed copies to avoid conflicts.
 301 // Its possible to have a closed cycle of copies (e.g., recirculating the same
 302 // values around a loop).  In this case I need a temp to break the cycle.
 303 void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, uint dst_name, uint src_name ) {
 304 
 305   // Scan backwards for the locations of the last use of the dst_name.
 306   // I am about to clobber the dst_name, so the copy must be inserted
 307   // after the last use.  Last use is really first-use on a backwards scan.
 308   uint i = b->end_idx()-1;
 309   while( 1 ) {
 310     Node *n = b->_nodes[i];
 311     // Check for end of virtual copies; this is also the end of the
 312     // parallel renaming effort.
 313     if( n->_idx < _unique ) break;
 314     uint idx = n->is_Copy();
 315     assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
 316     if( idx && _phc.Find(n->in(idx)) == dst_name ) break;
 317     i--;
 318   }
 319   uint last_use_idx = i;
 320 
 321   // Also search for any kill of src_name that exits the block.
 322   // Since the copy uses src_name, I have to come before any kill.
 323   uint kill_src_idx = b->end_idx();
 324   // There can be only 1 kill that exits any block and that is
 325   // the last kill.  Thus it is the first kill on a backwards scan.
 326   i = b->end_idx()-1;
 327   while( 1 ) {
 328     Node *n = b->_nodes[i];
 329     // Check for end of virtual copies; this is also the end of the
 330     // parallel renaming effort.
 331     if( n->_idx < _unique ) break;
 332     assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
 333     if( _phc.Find(n) == src_name ) {
 334       kill_src_idx = i;
 335       break;
 336     }
 337     i--;
 338   }
 339   // Need a temp?  Last use of dst comes after the kill of src?
 340   if( last_use_idx >= kill_src_idx ) {
 341     // Need to break a cycle with a temp
 342     uint idx = copy->is_Copy();
 343     Node *tmp = copy->clone();
 344     _phc.new_lrg(tmp,_phc._maxlrg++);
 345     // Insert new temp between copy and source
 346     tmp ->set_req(idx,copy->in(idx));
 347     copy->set_req(idx,tmp);
 348     // Save source in temp early, before source is killed
 349     b->_nodes.insert(kill_src_idx,tmp);
 350     _phc._cfg._bbs.map( tmp->_idx, b );
 351     last_use_idx++;
 352   }


src/share/vm/opto/coalesce.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File