1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "opto/block.hpp" 28 #include "opto/cfgnode.hpp" 29 #include "opto/chaitin.hpp" 30 #include "opto/coalesce.hpp" 31 #include "opto/connode.hpp" 32 #include "opto/indexSet.hpp" 33 #include "opto/machnode.hpp" 34 #include "opto/matcher.hpp" 35 #include "opto/regmask.hpp" 36 37 //============================================================================= 38 //------------------------------Dump------------------------------------------- 39 #ifndef PRODUCT 40 void PhaseCoalesce::dump(Node *n) const { 41 // Being a const function means I cannot use 'Find' 42 uint r = _phc._lrg_map.find(n); 43 tty->print("L%d/N%d ",r,n->_idx); 44 } 45 46 //------------------------------dump------------------------------------------- 47 void PhaseCoalesce::dump() const { 48 // I know I have a block layout now, so I can print blocks in a loop 49 for( uint i=0; i<_phc._cfg._num_blocks; i++ ) { 50 uint j; 51 Block *b = _phc._cfg._blocks[i]; 52 // Print a nice block header 53 tty->print("B%d: ",b->_pre_order); 54 for( j=1; j<b->num_preds(); j++ ) 55 tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order); 56 tty->print("-> "); 57 for( j=0; j<b->_num_succs; j++ ) 58 tty->print("B%d ",b->_succs[j]->_pre_order); 59 tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth); 60 uint cnt = b->_nodes.size(); 61 for( j=0; j<cnt; j++ ) { 62 Node *n = b->_nodes[j]; 63 dump( n ); 64 tty->print("\t%s\t",n->Name()); 65 66 // Dump the inputs 67 uint k; // Exit value of loop 68 for( k=0; k<n->req(); k++ ) // For all required inputs 69 if( n->in(k) ) dump( n->in(k) ); 70 else tty->print("_ "); 71 int any_prec = 0; 72 for( ; k<n->len(); k++ ) // For all precedence inputs 73 if( n->in(k) ) { 74 if( !any_prec++ ) tty->print(" |"); 75 dump( n->in(k) ); 76 } 77 78 // Dump node-specific info 79 n->dump_spec(tty); 80 tty->print("\n"); 81 82 } 83 tty->print("\n"); 84 } 85 } 86 #endif 87 88 //------------------------------combine_these_two------------------------------ 89 // Combine the live ranges def'd by these 2 Nodes. N2 is an input to N1. 90 void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) { 91 uint lr1 = _phc._lrg_map.find(n1); 92 uint lr2 = _phc._lrg_map.find(n2); 93 if( lr1 != lr2 && // Different live ranges already AND 94 !_phc._ifg->test_edge_sq( lr1, lr2 ) ) { // Do not interfere 95 LRG *lrg1 = &_phc.lrgs(lr1); 96 LRG *lrg2 = &_phc.lrgs(lr2); 97 // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK. 98 99 // Now, why is int->oop OK? We end up declaring a raw-pointer as an oop 100 // and in general that's a bad thing. However, int->oop conversions only 101 // happen at GC points, so the lifetime of the misclassified raw-pointer 102 // is from the CheckCastPP (that converts it to an oop) backwards up 103 // through a merge point and into the slow-path call, and around the 104 // diamond up to the heap-top check and back down into the slow-path call. 105 // The misclassified raw pointer is NOT live across the slow-path call, 106 // and so does not appear in any GC info, so the fact that it is 107 // misclassified is OK. 108 109 if( (lrg1->_is_oop || !lrg2->_is_oop) && // not an oop->int cast AND 110 // Compatible final mask 111 lrg1->mask().overlap( lrg2->mask() ) ) { 112 // Merge larger into smaller. 113 if( lr1 > lr2 ) { 114 uint tmp = lr1; lr1 = lr2; lr2 = tmp; 115 Node *n = n1; n1 = n2; n2 = n; 116 LRG *ltmp = lrg1; lrg1 = lrg2; lrg2 = ltmp; 117 } 118 // Union lr2 into lr1 119 _phc.Union( n1, n2 ); 120 if (lrg1->_maxfreq < lrg2->_maxfreq) 121 lrg1->_maxfreq = lrg2->_maxfreq; 122 // Merge in the IFG 123 _phc._ifg->Union( lr1, lr2 ); 124 // Combine register restrictions 125 lrg1->AND(lrg2->mask()); 126 } 127 } 128 } 129 130 //------------------------------coalesce_driver-------------------------------- 131 // Copy coalescing 132 void PhaseCoalesce::coalesce_driver( ) { 133 134 verify(); 135 // Coalesce from high frequency to low 136 for( uint i=0; i<_phc._cfg._num_blocks; i++ ) 137 coalesce( _phc._blks[i] ); 138 139 } 140 141 //------------------------------insert_copy_with_overlap----------------------- 142 // I am inserting copies to come out of SSA form. In the general case, I am 143 // doing a parallel renaming. I'm in the Named world now, so I can't do a 144 // general parallel renaming. All the copies now use "names" (live-ranges) 145 // to carry values instead of the explicit use-def chains. Suppose I need to 146 // insert 2 copies into the same block. They copy L161->L128 and L128->L132. 147 // If I insert them in the wrong order then L128 will get clobbered before it 148 // can get used by the second copy. This cannot happen in the SSA model; 149 // direct use-def chains get me the right value. It DOES happen in the named 150 // model so I have to handle the reordering of copies. 151 // 152 // In general, I need to topo-sort the placed copies to avoid conflicts. 153 // Its possible to have a closed cycle of copies (e.g., recirculating the same 154 // values around a loop). In this case I need a temp to break the cycle. 155 void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, uint dst_name, uint src_name ) { 156 157 // Scan backwards for the locations of the last use of the dst_name. 158 // I am about to clobber the dst_name, so the copy must be inserted 159 // after the last use. Last use is really first-use on a backwards scan. 160 uint i = b->end_idx()-1; 161 while(1) { 162 Node *n = b->_nodes[i]; 163 // Check for end of virtual copies; this is also the end of the 164 // parallel renaming effort. 165 if (n->_idx < _unique) { 166 break; 167 } 168 uint idx = n->is_Copy(); 169 assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" ); 170 if (idx && _phc._lrg_map.find(n->in(idx)) == dst_name) { 171 break; 172 } 173 i--; 174 } 175 uint last_use_idx = i; 176 177 // Also search for any kill of src_name that exits the block. 178 // Since the copy uses src_name, I have to come before any kill. 179 uint kill_src_idx = b->end_idx(); 180 // There can be only 1 kill that exits any block and that is 181 // the last kill. Thus it is the first kill on a backwards scan. 182 i = b->end_idx()-1; 183 while (1) { 184 Node *n = b->_nodes[i]; 185 // Check for end of virtual copies; this is also the end of the 186 // parallel renaming effort. 187 if (n->_idx < _unique) { 188 break; 189 } 190 assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" ); 191 if (_phc._lrg_map.find(n) == src_name) { 192 kill_src_idx = i; 193 break; 194 } 195 i--; 196 } 197 // Need a temp? Last use of dst comes after the kill of src? 198 if (last_use_idx >= kill_src_idx) { 199 // Need to break a cycle with a temp 200 uint idx = copy->is_Copy(); 201 Node *tmp = copy->clone(); 202 uint max_lrg_id = _phc._lrg_map.max_lrg_id(); 203 _phc.new_lrg(tmp, max_lrg_id); 204 _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); 205 206 // Insert new temp between copy and source 207 tmp ->set_req(idx,copy->in(idx)); 208 copy->set_req(idx,tmp); 209 // Save source in temp early, before source is killed 210 b->_nodes.insert(kill_src_idx,tmp); 211 _phc._cfg._bbs.map( tmp->_idx, b ); 212 last_use_idx++; 213 } 214 215 // Insert just after last use 216 b->_nodes.insert(last_use_idx+1,copy); 217 } 218 219 //------------------------------insert_copies---------------------------------- 220 void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { 221 // We do LRGs compressing and fix a liveout data only here since the other 222 // place in Split() is guarded by the assert which we never hit. 223 _phc._lrg_map.compress_uf_map_for_nodes(); 224 // Fix block's liveout data for compressed live ranges. 225 for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) { 226 uint compressed_lrg = _phc._lrg_map.find(lrg); 227 if (lrg != compressed_lrg) { 228 for (uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++) { 229 IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]); 230 if (liveout->member(lrg)) { 231 liveout->remove(lrg); 232 liveout->insert(compressed_lrg); 233 } 234 } 235 } 236 } 237 238 // All new nodes added are actual copies to replace virtual copies. 239 // Nodes with index less than '_unique' are original, non-virtual Nodes. 240 _unique = C->unique(); 241 242 for( uint i=0; i<_phc._cfg._num_blocks; i++ ) { 243 C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce"); 244 if (C->failing()) return; 245 Block *b = _phc._cfg._blocks[i]; 246 uint cnt = b->num_preds(); // Number of inputs to the Phi 247 248 for( uint l = 1; l<b->_nodes.size(); l++ ) { 249 Node *n = b->_nodes[l]; 250 251 // Do not use removed-copies, use copied value instead 252 uint ncnt = n->req(); 253 for( uint k = 1; k<ncnt; k++ ) { 254 Node *copy = n->in(k); 255 uint cidx = copy->is_Copy(); 256 if( cidx ) { 257 Node *def = copy->in(cidx); 258 if (_phc._lrg_map.find(copy) == _phc._lrg_map.find(def)) { 259 n->set_req(k, def); 260 } 261 } 262 } 263 264 // Remove any explicit copies that get coalesced. 265 uint cidx = n->is_Copy(); 266 if( cidx ) { 267 Node *def = n->in(cidx); 268 if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) { 269 n->replace_by(def); 270 n->set_req(cidx,NULL); 271 b->_nodes.remove(l); 272 l--; 273 continue; 274 } 275 } 276 277 if (n->is_Phi()) { 278 // Get the chosen name for the Phi 279 uint phi_name = _phc._lrg_map.find(n); 280 // Ignore the pre-allocated specials 281 if (!phi_name) { 282 continue; 283 } 284 // Check for mismatch inputs to Phi 285 for (uint j = 1; j < cnt; j++) { 286 Node *m = n->in(j); 287 uint src_name = _phc._lrg_map.find(m); 288 if (src_name != phi_name) { 289 Block *pred = _phc._cfg._bbs[b->pred(j)->_idx]; 290 Node *copy; 291 assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); 292 // Rematerialize constants instead of copying them 293 if( m->is_Mach() && m->as_Mach()->is_Con() && 294 m->as_Mach()->rematerialize() ) { 295 copy = m->clone(); 296 // Insert the copy in the predecessor basic block 297 pred->add_inst(copy); 298 // Copy any flags as well 299 _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map); 300 } else { 301 const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; 302 copy = new (C) MachSpillCopyNode(m, *rm, *rm); 303 // Find a good place to insert. Kinda tricky, use a subroutine 304 insert_copy_with_overlap(pred,copy,phi_name,src_name); 305 } 306 // Insert the copy in the use-def chain 307 n->set_req(j, copy); 308 _phc._cfg._bbs.map( copy->_idx, pred ); 309 // Extend ("register allocate") the names array for the copy. 310 _phc._lrg_map.extend(copy->_idx, phi_name); 311 } // End of if Phi names do not match 312 } // End of for all inputs to Phi 313 } else { // End of if Phi 314 315 // Now check for 2-address instructions 316 uint idx; 317 if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) { 318 // Get the chosen name for the Node 319 uint name = _phc._lrg_map.find(n); 320 assert (name, "no 2-address specials"); 321 // Check for name mis-match on the 2-address input 322 Node *m = n->in(idx); 323 if (_phc._lrg_map.find(m) != name) { 324 Node *copy; 325 assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); 326 // At this point it is unsafe to extend live ranges (6550579). 327 // Rematerialize only constants as we do for Phi above. 328 if(m->is_Mach() && m->as_Mach()->is_Con() && 329 m->as_Mach()->rematerialize()) { 330 copy = m->clone(); 331 // Insert the copy in the basic block, just before us 332 b->_nodes.insert(l++, copy); 333 l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map); 334 } else { 335 const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; 336 copy = new (C) MachSpillCopyNode(m, *rm, *rm); 337 // Insert the copy in the basic block, just before us 338 b->_nodes.insert(l++, copy); 339 } 340 // Insert the copy in the use-def chain 341 n->set_req(idx, copy); 342 // Extend ("register allocate") the names array for the copy. 343 _phc._lrg_map.extend(copy->_idx, name); 344 _phc._cfg._bbs.map( copy->_idx, b ); 345 } 346 347 } // End of is two-adr 348 349 // Insert a copy at a debug use for a lrg which has high frequency 350 if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) { 351 // Walk the debug inputs to the node and check for lrg freq 352 JVMState* jvms = n->jvms(); 353 uint debug_start = jvms ? jvms->debug_start() : 999999; 354 uint debug_end = jvms ? jvms->debug_end() : 999999; 355 for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) { 356 // Do not split monitors; they are only needed for debug table 357 // entries and need no code. 358 if (jvms->is_monitor_use(inpidx)) { 359 continue; 360 } 361 Node *inp = n->in(inpidx); 362 uint nidx = _phc._lrg_map.live_range_id(inp); 363 LRG &lrg = lrgs(nidx); 364 365 // If this lrg has a high frequency use/def 366 if( lrg._maxfreq >= _phc.high_frequency_lrg() ) { 367 // If the live range is also live out of this block (like it 368 // would be for a fast/slow idiom), the normal spill mechanism 369 // does an excellent job. If it is not live out of this block 370 // (like it would be for debug info to uncommon trap) splitting 371 // the live range now allows a better allocation in the high 372 // frequency blocks. 373 // Build_IFG_virtual has converted the live sets to 374 // live-IN info, not live-OUT info. 375 uint k; 376 for( k=0; k < b->_num_succs; k++ ) 377 if( _phc._live->live(b->_succs[k])->member( nidx ) ) 378 break; // Live in to some successor block? 379 if( k < b->_num_succs ) 380 continue; // Live out; do not pre-split 381 // Split the lrg at this use 382 const RegMask *rm = C->matcher()->idealreg2spillmask[inp->ideal_reg()]; 383 Node *copy = new (C) MachSpillCopyNode( inp, *rm, *rm ); 384 // Insert the copy in the use-def chain 385 n->set_req(inpidx, copy ); 386 // Insert the copy in the basic block, just before us 387 b->_nodes.insert( l++, copy ); 388 // Extend ("register allocate") the names array for the copy. 389 uint max_lrg_id = _phc._lrg_map.max_lrg_id(); 390 _phc.new_lrg(copy, max_lrg_id); 391 _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); 392 _phc._cfg._bbs.map(copy->_idx, b); 393 //tty->print_cr("Split a debug use in Aggressive Coalesce"); 394 } // End of if high frequency use/def 395 } // End of for all debug inputs 396 } // End of if low frequency safepoint 397 398 } // End of if Phi 399 400 } // End of for all instructions 401 } // End of for all blocks 402 } 403 404 //============================================================================= 405 //------------------------------coalesce--------------------------------------- 406 // Aggressive (but pessimistic) copy coalescing of a single block 407 408 // The following coalesce pass represents a single round of aggressive 409 // pessimistic coalesce. "Aggressive" means no attempt to preserve 410 // colorability when coalescing. This occasionally means more spills, but 411 // it also means fewer rounds of coalescing for better code - and that means 412 // faster compiles. 413 414 // "Pessimistic" means we do not hit the fixed point in one pass (and we are 415 // reaching for the least fixed point to boot). This is typically solved 416 // with a few more rounds of coalescing, but the compiler must run fast. We 417 // could optimistically coalescing everything touching PhiNodes together 418 // into one big live range, then check for self-interference. Everywhere 419 // the live range interferes with self it would have to be split. Finding 420 // the right split points can be done with some heuristics (based on 421 // expected frequency of edges in the live range). In short, it's a real 422 // research problem and the timeline is too short to allow such research. 423 // Further thoughts: (1) build the LR in a pass, (2) find self-interference 424 // in another pass, (3) per each self-conflict, split, (4) split by finding 425 // the low-cost cut (min-cut) of the LR, (5) edges in the LR are weighted 426 // according to the GCM algorithm (or just exec freq on CFG edges). 427 428 void PhaseAggressiveCoalesce::coalesce( Block *b ) { 429 // Copies are still "virtual" - meaning we have not made them explicitly 430 // copies. Instead, Phi functions of successor blocks have mis-matched 431 // live-ranges. If I fail to coalesce, I'll have to insert a copy to line 432 // up the live-ranges. Check for Phis in successor blocks. 433 uint i; 434 for( i=0; i<b->_num_succs; i++ ) { 435 Block *bs = b->_succs[i]; 436 // Find index of 'b' in 'bs' predecessors 437 uint j=1; 438 while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++; 439 // Visit all the Phis in successor block 440 for( uint k = 1; k<bs->_nodes.size(); k++ ) { 441 Node *n = bs->_nodes[k]; 442 if( !n->is_Phi() ) break; 443 combine_these_two( n, n->in(j) ); 444 } 445 } // End of for all successor blocks 446 447 448 // Check _this_ block for 2-address instructions and copies. 449 uint cnt = b->end_idx(); 450 for( i = 1; i<cnt; i++ ) { 451 Node *n = b->_nodes[i]; 452 uint idx; 453 // 2-address instructions have a virtual Copy matching their input 454 // to their output 455 if (n->is_Mach() && (idx = n->as_Mach()->two_adr())) { 456 MachNode *mach = n->as_Mach(); 457 combine_these_two(mach, mach->in(idx)); 458 } 459 } // End of for all instructions in block 460 } 461 462 //============================================================================= 463 //------------------------------PhaseConservativeCoalesce---------------------- 464 PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) { 465 _ulr.initialize(_phc._lrg_map.max_lrg_id()); 466 } 467 468 //------------------------------verify----------------------------------------- 469 void PhaseConservativeCoalesce::verify() { 470 #ifdef ASSERT 471 _phc.set_was_low(); 472 #endif 473 } 474 475 //------------------------------union_helper----------------------------------- 476 void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, uint lr1, uint lr2, Node *src_def, Node *dst_copy, Node *src_copy, Block *b, uint bindex ) { 477 // Join live ranges. Merge larger into smaller. Union lr2 into lr1 in the 478 // union-find tree 479 _phc.Union( lr1_node, lr2_node ); 480 481 // Single-def live range ONLY if both live ranges are single-def. 482 // If both are single def, then src_def powers one live range 483 // and def_copy powers the other. After merging, src_def powers 484 // the combined live range. 485 lrgs(lr1)._def = (lrgs(lr1).is_multidef() || 486 lrgs(lr2).is_multidef() ) 487 ? NodeSentinel : src_def; 488 lrgs(lr2)._def = NULL; // No def for lrg 2 489 lrgs(lr2).Clear(); // Force empty mask for LRG 2 490 //lrgs(lr2)._size = 0; // Live-range 2 goes dead 491 lrgs(lr1)._is_oop |= lrgs(lr2)._is_oop; 492 lrgs(lr2)._is_oop = 0; // In particular, not an oop for GC info 493 494 if (lrgs(lr1)._maxfreq < lrgs(lr2)._maxfreq) 495 lrgs(lr1)._maxfreq = lrgs(lr2)._maxfreq; 496 497 // Copy original value instead. Intermediate copies go dead, and 498 // the dst_copy becomes useless. 499 int didx = dst_copy->is_Copy(); 500 dst_copy->set_req( didx, src_def ); 501 // Add copy to free list 502 // _phc.free_spillcopy(b->_nodes[bindex]); 503 assert( b->_nodes[bindex] == dst_copy, "" ); 504 dst_copy->replace_by( dst_copy->in(didx) ); 505 dst_copy->set_req( didx, NULL); 506 b->_nodes.remove(bindex); 507 if( bindex < b->_ihrp_index ) b->_ihrp_index--; 508 if( bindex < b->_fhrp_index ) b->_fhrp_index--; 509 510 // Stretched lr1; add it to liveness of intermediate blocks 511 Block *b2 = _phc._cfg._bbs[src_copy->_idx]; 512 while( b != b2 ) { 513 b = _phc._cfg._bbs[b->pred(1)->_idx]; 514 _phc._live->live(b)->insert(lr1); 515 } 516 } 517 518 //------------------------------compute_separating_interferences--------------- 519 // Factored code from copy_copy that computes extra interferences from 520 // lengthening a live range by double-coalescing. 521 uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint reg_degree, uint rm_size, uint lr1, uint lr2 ) { 522 523 assert(!lrgs(lr1)._fat_proj, "cannot coalesce fat_proj"); 524 assert(!lrgs(lr2)._fat_proj, "cannot coalesce fat_proj"); 525 Node *prev_copy = dst_copy->in(dst_copy->is_Copy()); 526 Block *b2 = b; 527 uint bindex2 = bindex; 528 while( 1 ) { 529 // Find previous instruction 530 bindex2--; // Chain backwards 1 instruction 531 while( bindex2 == 0 ) { // At block start, find prior block 532 assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" ); 533 b2 = _phc._cfg._bbs[b2->pred(1)->_idx]; 534 bindex2 = b2->end_idx()-1; 535 } 536 // Get prior instruction 537 assert(bindex2 < b2->_nodes.size(), "index out of bounds"); 538 Node *x = b2->_nodes[bindex2]; 539 if( x == prev_copy ) { // Previous copy in copy chain? 540 if( prev_copy == src_copy)// Found end of chain and all interferences 541 break; // So break out of loop 542 // Else work back one in copy chain 543 prev_copy = prev_copy->in(prev_copy->is_Copy()); 544 } else { // Else collect interferences 545 uint lidx = _phc._lrg_map.find(x); 546 // Found another def of live-range being stretched? 547 if(lidx == lr1) { 548 return max_juint; 549 } 550 if(lidx == lr2) { 551 return max_juint; 552 } 553 554 // If we attempt to coalesce across a bound def 555 if( lrgs(lidx).is_bound() ) { 556 // Do not let the coalesced LRG expect to get the bound color 557 rm.SUBTRACT( lrgs(lidx).mask() ); 558 // Recompute rm_size 559 rm_size = rm.Size(); 560 //if( rm._flags ) rm_size += 1000000; 561 if( reg_degree >= rm_size ) return max_juint; 562 } 563 if( rm.overlap(lrgs(lidx).mask()) ) { 564 // Insert lidx into union LRG; returns TRUE if actually inserted 565 if( _ulr.insert(lidx) ) { 566 // Infinite-stack neighbors do not alter colorability, as they 567 // can always color to some other color. 568 if( !lrgs(lidx).mask().is_AllStack() ) { 569 // If this coalesce will make any new neighbor uncolorable, 570 // do not coalesce. 571 if( lrgs(lidx).just_lo_degree() ) 572 return max_juint; 573 // Bump our degree 574 if( ++reg_degree >= rm_size ) 575 return max_juint; 576 } // End of if not infinite-stack neighbor 577 } // End of if actually inserted 578 } // End of if live range overlaps 579 } // End of else collect interferences for 1 node 580 } // End of while forever, scan back for interferences 581 return reg_degree; 582 } 583 584 //------------------------------update_ifg------------------------------------- 585 void PhaseConservativeCoalesce::update_ifg(uint lr1, uint lr2, IndexSet *n_lr1, IndexSet *n_lr2) { 586 // Some original neighbors of lr1 might have gone away 587 // because the constrained register mask prevented them. 588 // Remove lr1 from such neighbors. 589 IndexSetIterator one(n_lr1); 590 uint neighbor; 591 LRG &lrg1 = lrgs(lr1); 592 while ((neighbor = one.next()) != 0) 593 if( !_ulr.member(neighbor) ) 594 if( _phc._ifg->neighbors(neighbor)->remove(lr1) ) 595 lrgs(neighbor).inc_degree( -lrg1.compute_degree(lrgs(neighbor)) ); 596 597 598 // lr2 is now called (coalesced into) lr1. 599 // Remove lr2 from the IFG. 600 IndexSetIterator two(n_lr2); 601 LRG &lrg2 = lrgs(lr2); 602 while ((neighbor = two.next()) != 0) 603 if( _phc._ifg->neighbors(neighbor)->remove(lr2) ) 604 lrgs(neighbor).inc_degree( -lrg2.compute_degree(lrgs(neighbor)) ); 605 606 // Some neighbors of intermediate copies now interfere with the 607 // combined live range. 608 IndexSetIterator three(&_ulr); 609 while ((neighbor = three.next()) != 0) 610 if( _phc._ifg->neighbors(neighbor)->insert(lr1) ) 611 lrgs(neighbor).inc_degree( lrg1.compute_degree(lrgs(neighbor)) ); 612 } 613 614 //------------------------------record_bias------------------------------------ 615 static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) { 616 // Tag copy bias here 617 if( !ifg->lrgs(lr1)._copy_bias ) 618 ifg->lrgs(lr1)._copy_bias = lr2; 619 if( !ifg->lrgs(lr2)._copy_bias ) 620 ifg->lrgs(lr2)._copy_bias = lr1; 621 } 622 623 //------------------------------copy_copy-------------------------------------- 624 // See if I can coalesce a series of multiple copies together. I need the 625 // final dest copy and the original src copy. They can be the same Node. 626 // Compute the compatible register masks. 627 bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block *b, uint bindex) { 628 629 if (!dst_copy->is_SpillCopy()) { 630 return false; 631 } 632 if (!src_copy->is_SpillCopy()) { 633 return false; 634 } 635 Node *src_def = src_copy->in(src_copy->is_Copy()); 636 uint lr1 = _phc._lrg_map.find(dst_copy); 637 uint lr2 = _phc._lrg_map.find(src_def); 638 639 // Same live ranges already? 640 if (lr1 == lr2) { 641 return false; 642 } 643 644 // Interfere? 645 if (_phc._ifg->test_edge_sq(lr1, lr2)) { 646 return false; 647 } 648 649 // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK. 650 if (!lrgs(lr1)._is_oop && lrgs(lr2)._is_oop) { // not an oop->int cast 651 return false; 652 } 653 654 // Coalescing between an aligned live range and a mis-aligned live range? 655 // No, no! Alignment changes how we count degree. 656 if (lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj) { 657 return false; 658 } 659 660 // Sort; use smaller live-range number 661 Node *lr1_node = dst_copy; 662 Node *lr2_node = src_def; 663 if (lr1 > lr2) { 664 uint tmp = lr1; lr1 = lr2; lr2 = tmp; 665 lr1_node = src_def; lr2_node = dst_copy; 666 } 667 668 // Check for compatibility of the 2 live ranges by 669 // intersecting their allowed register sets. 670 RegMask rm = lrgs(lr1).mask(); 671 rm.AND(lrgs(lr2).mask()); 672 // Number of bits free 673 uint rm_size = rm.Size(); 674 675 if (UseFPUForSpilling && rm.is_AllStack() ) { 676 // Don't coalesce when frequency difference is large 677 Block *dst_b = _phc._cfg._bbs[dst_copy->_idx]; 678 Block *src_def_b = _phc._cfg._bbs[src_def->_idx]; 679 if (src_def_b->_freq > 10*dst_b->_freq ) 680 return false; 681 } 682 683 // If we can use any stack slot, then effective size is infinite 684 if( rm.is_AllStack() ) rm_size += 1000000; 685 // Incompatible masks, no way to coalesce 686 if( rm_size == 0 ) return false; 687 688 // Another early bail-out test is when we are double-coalescing and the 689 // 2 copies are separated by some control flow. 690 if( dst_copy != src_copy ) { 691 Block *src_b = _phc._cfg._bbs[src_copy->_idx]; 692 Block *b2 = b; 693 while( b2 != src_b ) { 694 if( b2->num_preds() > 2 ){// Found merge-point 695 _phc._lost_opp_cflow_coalesce++; 696 // extra record_bias commented out because Chris believes it is not 697 // productive. Since we can record only 1 bias, we want to choose one 698 // that stands a chance of working and this one probably does not. 699 //record_bias( _phc._lrgs, lr1, lr2 ); 700 return false; // To hard to find all interferences 701 } 702 b2 = _phc._cfg._bbs[b2->pred(1)->_idx]; 703 } 704 } 705 706 // Union the two interference sets together into '_ulr' 707 uint reg_degree = _ulr.lrg_union( lr1, lr2, rm_size, _phc._ifg, rm ); 708 709 if( reg_degree >= rm_size ) { 710 record_bias( _phc._ifg, lr1, lr2 ); 711 return false; 712 } 713 714 // Now I need to compute all the interferences between dst_copy and 715 // src_copy. I'm not willing visit the entire interference graph, so 716 // I limit my search to things in dst_copy's block or in a straight 717 // line of previous blocks. I give up at merge points or when I get 718 // more interferences than my degree. I can stop when I find src_copy. 719 if( dst_copy != src_copy ) { 720 reg_degree = compute_separating_interferences(dst_copy, src_copy, b, bindex, rm, rm_size, reg_degree, lr1, lr2 ); 721 if( reg_degree == max_juint ) { 722 record_bias( _phc._ifg, lr1, lr2 ); 723 return false; 724 } 725 } // End of if dst_copy & src_copy are different 726 727 728 // ---- THE COMBINED LRG IS COLORABLE ---- 729 730 // YEAH - Now coalesce this copy away 731 assert( lrgs(lr1).num_regs() == lrgs(lr2).num_regs(), "" ); 732 733 IndexSet *n_lr1 = _phc._ifg->neighbors(lr1); 734 IndexSet *n_lr2 = _phc._ifg->neighbors(lr2); 735 736 // Update the interference graph 737 update_ifg(lr1, lr2, n_lr1, n_lr2); 738 739 _ulr.remove(lr1); 740 741 // Uncomment the following code to trace Coalescing in great detail. 742 // 743 //if (false) { 744 // tty->cr(); 745 // tty->print_cr("#######################################"); 746 // tty->print_cr("union %d and %d", lr1, lr2); 747 // n_lr1->dump(); 748 // n_lr2->dump(); 749 // tty->print_cr("resulting set is"); 750 // _ulr.dump(); 751 //} 752 753 // Replace n_lr1 with the new combined live range. _ulr will use 754 // n_lr1's old memory on the next iteration. n_lr2 is cleared to 755 // send its internal memory to the free list. 756 _ulr.swap(n_lr1); 757 _ulr.clear(); 758 n_lr2->clear(); 759 760 lrgs(lr1).set_degree( _phc._ifg->effective_degree(lr1) ); 761 lrgs(lr2).set_degree( 0 ); 762 763 // Join live ranges. Merge larger into smaller. Union lr2 into lr1 in the 764 // union-find tree 765 union_helper( lr1_node, lr2_node, lr1, lr2, src_def, dst_copy, src_copy, b, bindex ); 766 // Combine register restrictions 767 lrgs(lr1).set_mask(rm); 768 lrgs(lr1).compute_set_mask_size(); 769 lrgs(lr1)._cost += lrgs(lr2)._cost; 770 lrgs(lr1)._area += lrgs(lr2)._area; 771 772 // While its uncommon to successfully coalesce live ranges that started out 773 // being not-lo-degree, it can happen. In any case the combined coalesced 774 // live range better Simplify nicely. 775 lrgs(lr1)._was_lo = 1; 776 777 // kinda expensive to do all the time 778 //tty->print_cr("warning: slow verify happening"); 779 //_phc._ifg->verify( &_phc ); 780 return true; 781 } 782 783 //------------------------------coalesce--------------------------------------- 784 // Conservative (but pessimistic) copy coalescing of a single block 785 void PhaseConservativeCoalesce::coalesce( Block *b ) { 786 // Bail out on infrequent blocks 787 if( b->is_uncommon(_phc._cfg._bbs) ) 788 return; 789 // Check this block for copies. 790 for( uint i = 1; i<b->end_idx(); i++ ) { 791 // Check for actual copies on inputs. Coalesce a copy into its 792 // input if use and copy's input are compatible. 793 Node *copy1 = b->_nodes[i]; 794 uint idx1 = copy1->is_Copy(); 795 if( !idx1 ) continue; // Not a copy 796 797 if( copy_copy(copy1,copy1,b,i) ) { 798 i--; // Retry, same location in block 799 PhaseChaitin::_conserv_coalesce++; // Collect stats on success 800 continue; 801 } 802 } 803 }