src/share/vm/opto/chaitin.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File JDK-8022284 Cdiff src/share/vm/opto/chaitin.cpp

src/share/vm/opto/chaitin.cpp

Print this page

        

*** 293,314 **** _lrg_map.uf_extend(lrg, lrg); } bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) { ! Block *bcon = _cfg._bbs[con->_idx]; uint cindex = bcon->find_node(con); Node *con_next = bcon->_nodes[cindex+1]; if (con_next->in(0) != con || !con_next->is_MachProj()) { return false; // No MachProj's follow } // Copy kills after the cloned constant Node *kills = con_next->clone(); kills->set_req(0, copy); b->_nodes.insert(idx, kills); ! _cfg._bbs.map(kills->_idx, b); new_lrg(kills, max_lrg_id); return true; } //------------------------------compact---------------------------------------- --- 293,314 ---- _lrg_map.uf_extend(lrg, lrg); } bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) { ! Block* bcon = _cfg.get_block_for_node(con); uint cindex = bcon->find_node(con); Node *con_next = bcon->_nodes[cindex+1]; if (con_next->in(0) != con || !con_next->is_MachProj()) { return false; // No MachProj's follow } // Copy kills after the cloned constant Node *kills = con_next->clone(); kills->set_req(0, copy); b->_nodes.insert(idx, kills); ! _cfg.map_node_to_block(kills, b); new_lrg(kills, max_lrg_id); return true; } //------------------------------compact----------------------------------------
*** 960,971 **** // Limit result register mask to acceptable registers. // Do not limit registers from uncommon uses before // AggressiveCoalesce. This effectively pre-virtual-splits // around uncommon uses of common defs. const RegMask &rm = n->in_RegMask(k); ! if( !after_aggressive && ! _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) { // Since we are BEFORE aggressive coalesce, leave the register // mask untrimmed by the call. This encourages more coalescing. // Later, AFTER aggressive, this live range will have to spill // but the spiller handles slow-path calls very nicely. } else { --- 960,970 ---- // Limit result register mask to acceptable registers. // Do not limit registers from uncommon uses before // AggressiveCoalesce. This effectively pre-virtual-splits // around uncommon uses of common defs. const RegMask &rm = n->in_RegMask(k); ! if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * b->_freq) { // Since we are BEFORE aggressive coalesce, leave the register // mask untrimmed by the call. This encourages more coalescing. // Later, AFTER aggressive, this live range will have to spill // but the spiller handles slow-path calls very nicely. } else {
*** 1707,1726 **** if (base->in(0) == NULL) { // Initialize it once and make it shared: // set control to _root and place it into Start block // (where top() node is placed). base->init_req(0, _cfg._root); ! Block *startb = _cfg._bbs[C->top()->_idx]; startb->_nodes.insert(startb->find_node(C->top()), base ); ! _cfg._bbs.map( base->_idx, startb ); assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); } if (_lrg_map.live_range_id(base) == 0) { new_lrg(base, maxlrg++); } ! assert(base->in(0) == _cfg._root && ! _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared"); derived_base_map[derived->_idx] = base; return base; } // Check for AddP-related opcodes --- 1706,1724 ---- if (base->in(0) == NULL) { // Initialize it once and make it shared: // set control to _root and place it into Start block // (where top() node is placed). base->init_req(0, _cfg._root); ! Block *startb = _cfg.get_block_for_node(C->top()); startb->_nodes.insert(startb->find_node(C->top()), base ); ! _cfg.map_node_to_block(base, startb); assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); } if (_lrg_map.live_range_id(base) == 0) { new_lrg(base, maxlrg++); } ! assert(base->in(0) == _cfg._root && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared"); derived_base_map[derived->_idx] = base; return base; } // Check for AddP-related opcodes
*** 1752,1767 **** t = t->meet(base->in(i)->bottom_type()); } base->as_Phi()->set_type(t); // Search the current block for an existing base-Phi ! Block *b = _cfg._bbs[derived->_idx]; for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi Node *phi = b->_nodes[i]; if( !phi->is_Phi() ) { // Found end of Phis with no match? b->_nodes.insert( i, base ); // Must insert created Phi here as base ! _cfg._bbs.map( base->_idx, b ); new_lrg(base,maxlrg++); break; } // See if Phi matches. uint j; --- 1750,1765 ---- t = t->meet(base->in(i)->bottom_type()); } base->as_Phi()->set_type(t); // Search the current block for an existing base-Phi ! Block *b = _cfg.get_block_for_node(derived); for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi Node *phi = b->_nodes[i]; if( !phi->is_Phi() ) { // Found end of Phis with no match? b->_nodes.insert( i, base ); // Must insert created Phi here as base ! _cfg.map_node_to_block(base, b); new_lrg(base,maxlrg++); break; } // See if Phi matches. uint j;
*** 1813,1824 **** // one after. Instead we split the input to the compare just after the // phi. if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) { Node *phi = n->in(1); if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) { ! Block *phi_block = _cfg._bbs[phi->_idx]; ! if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) { const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI]; Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask ); insert_proj( phi_block, 1, spill, maxlrg++ ); n->set_req(1,spill); must_recompute_live = true; --- 1811,1822 ---- // one after. Instead we split the input to the compare just after the // phi. if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) { Node *phi = n->in(1); if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) { ! Block *phi_block = _cfg.get_block_for_node(phi); ! if (_cfg.get_block_for_node(phi_block->pred(2)) == b) { const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI]; Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask ); insert_proj( phi_block, 1, spill, maxlrg++ ); n->set_req(1,spill); must_recompute_live = true;
*** 1868,1878 **** // reaching def's. So if I find the base's live range then // I know the base's def reaches here. if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND (_lrg_map.live_range_id(base) > 0) && // not a constant ! _cfg._bbs[base->_idx] != b) { // base not def'd in blk) // Base pointer is not currently live. Since I stretched // the base pointer to here and it crosses basic-block // boundaries, the global live info is now incorrect. // Recompute live. must_recompute_live = true; --- 1866,1876 ---- // reaching def's. So if I find the base's live range then // I know the base's def reaches here. if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND (_lrg_map.live_range_id(base) > 0) && // not a constant ! _cfg.get_block_for_node(base) != b) { // base not def'd in blk) // Base pointer is not currently live. Since I stretched // the base pointer to here and it crosses basic-block // boundaries, the global live info is now incorrect. // Recompute live. must_recompute_live = true;
*** 1991,2002 **** tty->print(" Spill_2"); } tty->print("\n"); } ! void PhaseChaitin::dump( const Block * b ) const { ! b->dump_head( &_cfg._bbs ); // For all instructions for( uint j = 0; j < b->_nodes.size(); j++ ) dump(b->_nodes[j]); // Print live-out info at end of block --- 1989,2000 ---- tty->print(" Spill_2"); } tty->print("\n"); } ! void PhaseChaitin::dump(const Block *b) const { ! b->dump_head(&_cfg); // For all instructions for( uint j = 0; j < b->_nodes.size(); j++ ) dump(b->_nodes[j]); // Print live-out info at end of block
*** 2297,2307 **** for( uint j = 0; j < b->_nodes.size(); j++ ) { Node *n = b->_nodes[j]; if (_lrg_map.find_const(n) == lidx) { if (!dump_once++) { tty->cr(); ! b->dump_head( &_cfg._bbs ); } dump(n); continue; } if (!defs_only) { --- 2295,2305 ---- for( uint j = 0; j < b->_nodes.size(); j++ ) { Node *n = b->_nodes[j]; if (_lrg_map.find_const(n) == lidx) { if (!dump_once++) { tty->cr(); ! b->dump_head(&_cfg); } dump(n); continue; } if (!defs_only) {
*** 2312,2322 **** continue; // be robust in the dumper } if (_lrg_map.find_const(m) == lidx) { if (!dump_once++) { tty->cr(); ! b->dump_head(&_cfg._bbs); } dump(n); } } } --- 2310,2320 ---- continue; // be robust in the dumper } if (_lrg_map.find_const(m) == lidx) { if (!dump_once++) { tty->cr(); ! b->dump_head(&_cfg); } dump(n); } } }
src/share/vm/opto/chaitin.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File