1 /* 2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/oopMap.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/block.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/chaitin.hpp" 33 #include "opto/coalesce.hpp" 34 #include "opto/indexSet.hpp" 35 #include "opto/machnode.hpp" 36 #include "opto/memnode.hpp" 37 #include "opto/opcodes.hpp" 38 39 PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) { 40 } 41 42 void PhaseIFG::init( uint maxlrg ) { 43 _maxlrg = maxlrg; 44 _yanked = new (_arena) VectorSet(_arena); 45 _is_square = false; 46 // Make uninitialized adjacency lists 47 _adjs = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*maxlrg); 48 // Also make empty live range structures 49 _lrgs = (LRG *)_arena->Amalloc( maxlrg * sizeof(LRG) ); 50 memset(_lrgs,0,sizeof(LRG)*maxlrg); 51 // Init all to empty 52 for( uint i = 0; i < maxlrg; i++ ) { 53 _adjs[i].initialize(maxlrg); 54 _lrgs[i].Set_All(); 55 } 56 } 57 58 // Add edge between vertices a & b. These are sorted (triangular matrix), 59 // then the smaller number is inserted in the larger numbered array. 60 int PhaseIFG::add_edge( uint a, uint b ) { 61 lrgs(a).invalid_degree(); 62 lrgs(b).invalid_degree(); 63 // Sort a and b, so that a is bigger 64 assert( !_is_square, "only on triangular" ); 65 if( a < b ) { uint tmp = a; a = b; b = tmp; } 66 return _adjs[a].insert( b ); 67 } 68 69 // Add an edge between 'a' and everything in the vector. 70 void PhaseIFG::add_vector( uint a, IndexSet *vec ) { 71 // IFG is triangular, so do the inserts where 'a' < 'b'. 72 assert( !_is_square, "only on triangular" ); 73 IndexSet *adjs_a = &_adjs[a]; 74 if( !vec->count() ) return; 75 76 IndexSetIterator elements(vec); 77 uint neighbor; 78 while ((neighbor = elements.next()) != 0) { 79 add_edge( a, neighbor ); 80 } 81 } 82 83 // Is there an edge between a and b? 84 int PhaseIFG::test_edge( uint a, uint b ) const { 85 // Sort a and b, so that a is larger 86 assert( !_is_square, "only on triangular" ); 87 if( a < b ) { uint tmp = a; a = b; b = tmp; } 88 return _adjs[a].member(b); 89 } 90 91 // Convert triangular matrix to square matrix 92 void PhaseIFG::SquareUp() { 93 assert( !_is_square, "only on triangular" ); 94 95 // Simple transpose 96 for( uint i = 0; i < _maxlrg; i++ ) { 97 IndexSetIterator elements(&_adjs[i]); 98 uint datum; 99 while ((datum = elements.next()) != 0) { 100 _adjs[datum].insert( i ); 101 } 102 } 103 _is_square = true; 104 } 105 106 // Compute effective degree in bulk 107 void PhaseIFG::Compute_Effective_Degree() { 108 assert( _is_square, "only on square" ); 109 110 for( uint i = 0; i < _maxlrg; i++ ) 111 lrgs(i).set_degree(effective_degree(i)); 112 } 113 114 int PhaseIFG::test_edge_sq( uint a, uint b ) const { 115 assert( _is_square, "only on square" ); 116 // Swap, so that 'a' has the lesser count. Then binary search is on 117 // the smaller of a's list and b's list. 118 if( neighbor_cnt(a) > neighbor_cnt(b) ) { uint tmp = a; a = b; b = tmp; } 119 //return _adjs[a].unordered_member(b); 120 return _adjs[a].member(b); 121 } 122 123 // Union edges of B into A 124 void PhaseIFG::Union( uint a, uint b ) { 125 assert( _is_square, "only on square" ); 126 IndexSet *A = &_adjs[a]; 127 IndexSetIterator b_elements(&_adjs[b]); 128 uint datum; 129 while ((datum = b_elements.next()) != 0) { 130 if(A->insert(datum)) { 131 _adjs[datum].insert(a); 132 lrgs(a).invalid_degree(); 133 lrgs(datum).invalid_degree(); 134 } 135 } 136 } 137 138 // Yank a Node and all connected edges from the IFG. Return a 139 // list of neighbors (edges) yanked. 140 IndexSet *PhaseIFG::remove_node( uint a ) { 141 assert( _is_square, "only on square" ); 142 assert( !_yanked->test(a), "" ); 143 _yanked->set(a); 144 145 // I remove the LRG from all neighbors. 146 IndexSetIterator elements(&_adjs[a]); 147 LRG &lrg_a = lrgs(a); 148 uint datum; 149 while ((datum = elements.next()) != 0) { 150 _adjs[datum].remove(a); 151 lrgs(datum).inc_degree( -lrg_a.compute_degree(lrgs(datum)) ); 152 } 153 return neighbors(a); 154 } 155 156 // Re-insert a yanked Node. 157 void PhaseIFG::re_insert( uint a ) { 158 assert( _is_square, "only on square" ); 159 assert( _yanked->test(a), "" ); 160 (*_yanked) >>= a; 161 162 IndexSetIterator elements(&_adjs[a]); 163 uint datum; 164 while ((datum = elements.next()) != 0) { 165 _adjs[datum].insert(a); 166 lrgs(datum).invalid_degree(); 167 } 168 } 169 170 // Compute the degree between 2 live ranges. If both live ranges are 171 // aligned-adjacent powers-of-2 then we use the MAX size. If either is 172 // mis-aligned (or for Fat-Projections, not-adjacent) then we have to 173 // MULTIPLY the sizes. Inspect Brigg's thesis on register pairs to see why 174 // this is so. 175 int LRG::compute_degree( LRG &l ) const { 176 int tmp; 177 int num_regs = _num_regs; 178 int nregs = l.num_regs(); 179 tmp = (_fat_proj || l._fat_proj) // either is a fat-proj? 180 ? (num_regs * nregs) // then use product 181 : MAX2(num_regs,nregs); // else use max 182 return tmp; 183 } 184 185 // Compute effective degree for this live range. If both live ranges are 186 // aligned-adjacent powers-of-2 then we use the MAX size. If either is 187 // mis-aligned (or for Fat-Projections, not-adjacent) then we have to 188 // MULTIPLY the sizes. Inspect Brigg's thesis on register pairs to see why 189 // this is so. 190 int PhaseIFG::effective_degree( uint lidx ) const { 191 int eff = 0; 192 int num_regs = lrgs(lidx).num_regs(); 193 int fat_proj = lrgs(lidx)._fat_proj; 194 IndexSet *s = neighbors(lidx); 195 IndexSetIterator elements(s); 196 uint nidx; 197 while((nidx = elements.next()) != 0) { 198 LRG &lrgn = lrgs(nidx); 199 int nregs = lrgn.num_regs(); 200 eff += (fat_proj || lrgn._fat_proj) // either is a fat-proj? 201 ? (num_regs * nregs) // then use product 202 : MAX2(num_regs,nregs); // else use max 203 } 204 return eff; 205 } 206 207 208 #ifndef PRODUCT 209 void PhaseIFG::dump() const { 210 tty->print_cr("-- Interference Graph --%s--", 211 _is_square ? "square" : "triangular" ); 212 if( _is_square ) { 213 for( uint i = 0; i < _maxlrg; i++ ) { 214 tty->print( (*_yanked)[i] ? "XX " : " "); 215 tty->print("L%d: { ",i); 216 IndexSetIterator elements(&_adjs[i]); 217 uint datum; 218 while ((datum = elements.next()) != 0) { 219 tty->print("L%d ", datum); 220 } 221 tty->print_cr("}"); 222 223 } 224 return; 225 } 226 227 // Triangular 228 for( uint i = 0; i < _maxlrg; i++ ) { 229 uint j; 230 tty->print( (*_yanked)[i] ? "XX " : " "); 231 tty->print("L%d: { ",i); 232 for( j = _maxlrg; j > i; j-- ) 233 if( test_edge(j - 1,i) ) { 234 tty->print("L%d ",j - 1); 235 } 236 tty->print("| "); 237 IndexSetIterator elements(&_adjs[i]); 238 uint datum; 239 while ((datum = elements.next()) != 0) { 240 tty->print("L%d ", datum); 241 } 242 tty->print("}\n"); 243 } 244 tty->print("\n"); 245 } 246 247 void PhaseIFG::stats() const { 248 ResourceMark rm; 249 int *h_cnt = NEW_RESOURCE_ARRAY(int,_maxlrg*2); 250 memset( h_cnt, 0, sizeof(int)*_maxlrg*2 ); 251 uint i; 252 for( i = 0; i < _maxlrg; i++ ) { 253 h_cnt[neighbor_cnt(i)]++; 254 } 255 tty->print_cr("--Histogram of counts--"); 256 for( i = 0; i < _maxlrg*2; i++ ) 257 if( h_cnt[i] ) 258 tty->print("%d/%d ",i,h_cnt[i]); 259 tty->cr(); 260 } 261 262 void PhaseIFG::verify( const PhaseChaitin *pc ) const { 263 // IFG is square, sorted and no need for Find 264 for( uint i = 0; i < _maxlrg; i++ ) { 265 assert(!((*_yanked)[i]) || !neighbor_cnt(i), "Is removed completely" ); 266 IndexSet *set = &_adjs[i]; 267 IndexSetIterator elements(set); 268 uint idx; 269 uint last = 0; 270 while ((idx = elements.next()) != 0) { 271 assert(idx != i, "Must have empty diagonal"); 272 assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find"); 273 assert(_adjs[idx].member(i), "IFG not square"); 274 assert(!(*_yanked)[idx], "No yanked neighbors"); 275 assert(last < idx, "not sorted increasing"); 276 last = idx; 277 } 278 assert(!lrgs(i)._degree_valid || effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong"); 279 } 280 } 281 #endif 282 283 /* 284 * Interfere this register with everything currently live. 285 * Check for interference by checking overlap of regmasks. 286 * Only interfere if acceptable register masks overlap. 287 */ 288 void PhaseChaitin::interfere_with_live(uint lid, IndexSet* liveout) { 289 LRG& lrg = lrgs(lid); 290 const RegMask& rm = lrg.mask(); 291 IndexSetIterator elements(liveout); 292 uint interfering_lid = elements.next(); 293 while (interfering_lid != 0) { 294 LRG& interfering_lrg = lrgs(interfering_lid); 295 if (rm.overlap(interfering_lrg.mask())) { 296 _ifg->add_edge(lid, interfering_lid); 297 } 298 interfering_lid = elements.next(); 299 } 300 } 301 302 // Actually build the interference graph. Uses virtual registers only, no 303 // physical register masks. This allows me to be very aggressive when 304 // coalescing copies. Some of this aggressiveness will have to be undone 305 // later, but I'd rather get all the copies I can now (since unremoved copies 306 // at this point can end up in bad places). Copies I re-insert later I have 307 // more opportunity to insert them in low-frequency locations. 308 void PhaseChaitin::build_ifg_virtual( ) { 309 Compile::TracePhase tp("buildIFG_virt", &timers[_t_buildIFGvirtual]); 310 311 // For all blocks (in any order) do... 312 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 313 Block* block = _cfg.get_block(i); 314 IndexSet* liveout = _live->live(block); 315 316 // The IFG is built by a single reverse pass over each basic block. 317 // Starting with the known live-out set, we remove things that get 318 // defined and add things that become live (essentially executing one 319 // pass of a standard LIVE analysis). Just before a Node defines a value 320 // (and removes it from the live-ness set) that value is certainly live. 321 // The defined value interferes with everything currently live. The 322 // value is then removed from the live-ness set and it's inputs are 323 // added to the live-ness set. 324 for (uint j = block->end_idx() + 1; j > 1; j--) { 325 Node* n = block->get_node(j - 1); 326 327 // Get value being defined 328 uint r = _lrg_map.live_range_id(n); 329 330 // Some special values do not allocate 331 if (r) { 332 333 // Remove from live-out set 334 liveout->remove(r); 335 336 // Copies do not define a new value and so do not interfere. 337 // Remove the copies source from the liveout set before interfering. 338 uint idx = n->is_Copy(); 339 if (idx != 0) { 340 liveout->remove(_lrg_map.live_range_id(n->in(idx))); 341 } 342 343 // Interfere with everything live 344 interfere_with_live(r, liveout); 345 } 346 347 // Make all inputs live 348 if (!n->is_Phi()) { // Phi function uses come from prior block 349 for(uint k = 1; k < n->req(); k++) { 350 liveout->insert(_lrg_map.live_range_id(n->in(k))); 351 } 352 } 353 354 // 2-address instructions always have the defined value live 355 // on entry to the instruction, even though it is being defined 356 // by the instruction. We pretend a virtual copy sits just prior 357 // to the instruction and kills the src-def'd register. 358 // In other words, for 2-address instructions the defined value 359 // interferes with all inputs. 360 uint idx; 361 if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) { 362 const MachNode *mach = n->as_Mach(); 363 // Sometimes my 2-address ADDs are commuted in a bad way. 364 // We generally want the USE-DEF register to refer to the 365 // loop-varying quantity, to avoid a copy. 366 uint op = mach->ideal_Opcode(); 367 // Check that mach->num_opnds() == 3 to ensure instruction is 368 // not subsuming constants, effectively excludes addI_cin_imm 369 // Can NOT swap for instructions like addI_cin_imm since it 370 // is adding zero to yhi + carry and the second ideal-input 371 // points to the result of adding low-halves. 372 // Checking req() and num_opnds() does NOT distinguish addI_cout from addI_cout_imm 373 if( (op == Op_AddI && mach->req() == 3 && mach->num_opnds() == 3) && 374 n->in(1)->bottom_type()->base() == Type::Int && 375 // See if the ADD is involved in a tight data loop the wrong way 376 n->in(2)->is_Phi() && 377 n->in(2)->in(2) == n ) { 378 Node *tmp = n->in(1); 379 n->set_req( 1, n->in(2) ); 380 n->set_req( 2, tmp ); 381 } 382 // Defined value interferes with all inputs 383 uint lidx = _lrg_map.live_range_id(n->in(idx)); 384 for (uint k = 1; k < n->req(); k++) { 385 uint kidx = _lrg_map.live_range_id(n->in(k)); 386 if (kidx != lidx) { 387 _ifg->add_edge(r, kidx); 388 } 389 } 390 } 391 } // End of forall instructions in block 392 } // End of forall blocks 393 } 394 395 #ifdef ASSERT 396 uint PhaseChaitin::count_int_pressure(IndexSet* liveout) { 397 IndexSetIterator elements(liveout); 398 uint lidx = elements.next(); 399 uint cnt = 0; 400 while (lidx != 0) { 401 LRG& lrg = lrgs(lidx); 402 if (lrg.mask_is_nonempty_and_up() && 403 !lrg.is_float_or_vector() && 404 lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI])) { 405 cnt += lrg.reg_pressure(); 406 } 407 lidx = elements.next(); 408 } 409 return cnt; 410 } 411 412 uint PhaseChaitin::count_float_pressure(IndexSet* liveout) { 413 IndexSetIterator elements(liveout); 414 uint lidx = elements.next(); 415 uint cnt = 0; 416 while (lidx != 0) { 417 LRG& lrg = lrgs(lidx); 418 if (lrg.mask_is_nonempty_and_up() && lrg.is_float_or_vector()) { 419 cnt += lrg.reg_pressure(); 420 } 421 lidx = elements.next(); 422 } 423 return cnt; 424 } 425 #endif 426 427 /* 428 * Adjust register pressure down by 1. Capture last hi-to-low transition, 429 */ 430 void PhaseChaitin::lower_pressure(Block* b, uint location, LRG& lrg, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure) { 431 if (lrg.mask_is_nonempty_and_up()) { 432 if (lrg.is_float_or_vector()) { 433 float_pressure.lower(lrg, location); 434 } else { 435 // Do not count the SP and flag registers 436 const RegMask& r = lrg.mask(); 437 if (r.overlap(*Matcher::idealreg2regmask[Op_RegI])) { 438 int_pressure.lower(lrg, location); 439 } 440 } 441 } 442 assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect"); 443 assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect"); 444 } 445 446 /* Go to the first non-phi index in a block */ 447 static uint first_nonphi_index(Block* b) { 448 uint i; 449 uint end_idx = b->end_idx(); 450 for (i = 1; i < end_idx; i++) { 451 Node* n = b->get_node(i); 452 if (!n->is_Phi()) { 453 break; 454 } 455 } 456 return i; 457 } 458 459 /* 460 * Spills could be inserted before a CreateEx node which should be the first 461 * instruction in a block after Phi nodes. If so, move the CreateEx node up. 462 */ 463 static void move_exception_node_up(Block* b, uint first_inst, uint last_inst) { 464 for (uint i = first_inst; i < last_inst; i++) { 465 Node* ex = b->get_node(i); 466 if (ex->is_SpillCopy()) { 467 continue; 468 } 469 470 if (i > first_inst && 471 ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) { 472 b->remove_node(i); 473 b->insert_node(ex, first_inst); 474 } 475 // Stop once a CreateEx or any other node is found 476 break; 477 } 478 } 479 480 /* 481 * When new live ranges are live, we raise the register pressure 482 */ 483 void PhaseChaitin::raise_pressure(Block* b, LRG& lrg, Pressure& int_pressure, Pressure& float_pressure) { 484 if (lrg.mask_is_nonempty_and_up()) { 485 if (lrg.is_float_or_vector()) { 486 float_pressure.raise(lrg); 487 } else { 488 // Do not count the SP and flag registers 489 const RegMask& rm = lrg.mask(); 490 if (rm.overlap(*Matcher::idealreg2regmask[Op_RegI])) { 491 int_pressure.raise(lrg); 492 } 493 } 494 } 495 } 496 497 498 /* 499 * Computes the initial register pressure of a block, looking at all live 500 * ranges in the liveout. The register pressure is computed for both float 501 * and int/pointer registers. 502 * Live ranges in the liveout are presumed live for the whole block. 503 * We add the cost for the whole block to the area of the live ranges initially. 504 * If a live range gets killed in the block, we'll subtract the unused part of 505 * the block from the area. 506 */ 507 void PhaseChaitin::compute_initial_block_pressure(Block* b, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure, double cost) { 508 IndexSetIterator elements(liveout); 509 uint lid = elements.next(); 510 while (lid != 0) { 511 LRG& lrg = lrgs(lid); 512 lrg._area += cost; 513 raise_pressure(b, lrg, int_pressure, float_pressure); 514 lid = elements.next(); 515 } 516 assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect"); 517 assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect"); 518 } 519 520 /* 521 * Remove dead node if it's not used. 522 * We only remove projection nodes if the node "defining" the projection is 523 * dead, for example on x86, if we have a dead Add node we remove its 524 * RFLAGS node. 525 */ 526 bool PhaseChaitin::remove_node_if_not_used(Block* b, uint location, Node* n, uint lid, IndexSet* liveout) { 527 Node* def = n->in(0); 528 if (!n->is_Proj() || 529 (_lrg_map.live_range_id(def) && !liveout->member(_lrg_map.live_range_id(def)))) { 530 if (n->is_MachProj()) { 531 // Don't remove KILL projections if their "defining" nodes have 532 // memory effects (have SCMemProj projection node) - 533 // they are not dead even when their result is not used. 534 // For example, compareAndSwapL (and other CAS) and EncodeISOArray nodes. 535 // The method add_input_to_liveout() keeps such nodes alive (put them on liveout list) 536 // when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed 537 // in block in such order that KILL MachProj nodes are processed first. 538 if (def->has_out_with(Op_SCMemProj)) { 539 return false; 540 } 541 } 542 b->remove_node(location); 543 LRG& lrg = lrgs(lid); 544 if (lrg._def == n) { 545 lrg._def = 0; 546 } 547 n->disconnect_inputs(NULL, C); 548 _cfg.unmap_node_from_block(n); 549 n->replace_by(C->top()); 550 return true; 551 } 552 return false; 553 } 554 555 /* 556 * When encountering a fat projection, we might go from a low to high to low 557 * (since the fat proj only lives at this instruction) going backwards in the 558 * block. If we find a low to high transition, we record it. 559 */ 560 void PhaseChaitin::check_for_high_pressure_transition_at_fatproj(uint& block_reg_pressure, uint location, LRG& lrg, Pressure& pressure, const int op_regtype) { 561 RegMask mask_tmp = lrg.mask(); 562 mask_tmp.AND(*Matcher::idealreg2regmask[op_regtype]); 563 pressure.check_pressure_at_fatproj(location, mask_tmp); 564 } 565 566 /* 567 * Insure high score for immediate-use spill copies so they get a color. 568 * All single-use MachSpillCopy(s) that immediately precede their 569 * use must color early. If a longer live range steals their 570 * color, the spill copy will split and may push another spill copy 571 * further away resulting in an infinite spill-split-retry cycle. 572 * Assigning a zero area results in a high score() and a good 573 * location in the simplify list. 574 */ 575 void PhaseChaitin::assign_high_score_to_immediate_copies(Block* b, Node* n, LRG& lrg, uint next_inst, uint last_inst) { 576 if (n->is_SpillCopy() && 577 lrg.is_singledef() && // A multi defined live range can still split 578 n->outcnt() == 1 && // and use must be in this block 579 _cfg.get_block_for_node(n->unique_out()) == b) { 580 581 Node* single_use = n->unique_out(); 582 assert(b->find_node(single_use) >= next_inst, "Use must be later in block"); 583 // Use can be earlier in block if it is a Phi, but then I should be a MultiDef 584 585 // Find first non SpillCopy 'm' that follows the current instruction 586 // (current_inst - 1) is index for current instruction 'n' 587 Node* m = n; 588 for (uint i = next_inst; i <= last_inst && m->is_SpillCopy(); ++i) { 589 m = b->get_node(i); 590 } 591 if (m == single_use) { 592 lrg._area = 0.0; 593 } 594 } 595 } 596 597 /* 598 * Copies do not define a new value and so do not interfere. 599 * Remove the copies source from the liveout set before interfering. 600 */ 601 void PhaseChaitin::remove_interference_from_copy(Block* b, uint location, uint lid_copy, IndexSet* liveout, double cost, Pressure& int_pressure, Pressure& float_pressure) { 602 if (liveout->remove(lid_copy)) { 603 LRG& lrg_copy = lrgs(lid_copy); 604 lrg_copy._area -= cost; 605 606 // Lower register pressure since copy and definition can share the same register 607 lower_pressure(b, location, lrg_copy, liveout, int_pressure, float_pressure); 608 } 609 } 610 611 /* 612 * The defined value must go in a particular register. Remove that register from 613 * all conflicting parties and avoid the interference. 614 */ 615 void PhaseChaitin::remove_bound_register_from_interfering_live_ranges(LRG& lrg, IndexSet* liveout, uint& must_spill) { 616 // Check for common case 617 const RegMask& rm = lrg.mask(); 618 int r_size = lrg.num_regs(); 619 // Smear odd bits 620 IndexSetIterator elements(liveout); 621 uint l = elements.next(); 622 while (l != 0) { 623 LRG& interfering_lrg = lrgs(l); 624 // If 'l' must spill already, do not further hack his bits. 625 // He'll get some interferences and be forced to spill later. 626 if (interfering_lrg._must_spill) { 627 l = elements.next(); 628 continue; 629 } 630 631 // Remove bound register(s) from 'l's choices 632 RegMask old = interfering_lrg.mask(); 633 uint old_size = interfering_lrg.mask_size(); 634 635 // Remove the bits from LRG 'rm' from LRG 'l' so 'l' no 636 // longer interferes with 'rm'. If 'l' requires aligned 637 // adjacent pairs, subtract out bit pairs. 638 assert(!interfering_lrg._is_vector || !interfering_lrg._fat_proj, "sanity"); 639 640 if (interfering_lrg.num_regs() > 1 && !interfering_lrg._fat_proj) { 641 RegMask r2mask = rm; 642 // Leave only aligned set of bits. 643 r2mask.smear_to_sets(interfering_lrg.num_regs()); 644 // It includes vector case. 645 interfering_lrg.SUBTRACT(r2mask); 646 interfering_lrg.compute_set_mask_size(); 647 } else if (r_size != 1) { 648 // fat proj 649 interfering_lrg.SUBTRACT(rm); 650 interfering_lrg.compute_set_mask_size(); 651 } else { 652 // Common case: size 1 bound removal 653 OptoReg::Name r_reg = rm.find_first_elem(); 654 if (interfering_lrg.mask().Member(r_reg)) { 655 interfering_lrg.Remove(r_reg); 656 interfering_lrg.set_mask_size(interfering_lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1); 657 } 658 } 659 660 // If 'l' goes completely dry, it must spill. 661 if (interfering_lrg.not_free()) { 662 // Give 'l' some kind of reasonable mask, so it picks up 663 // interferences (and will spill later). 664 interfering_lrg.set_mask(old); 665 interfering_lrg.set_mask_size(old_size); 666 must_spill++; 667 interfering_lrg._must_spill = 1; 668 interfering_lrg.set_reg(OptoReg::Name(LRG::SPILL_REG)); 669 } 670 l = elements.next(); 671 } 672 } 673 674 /* 675 * Start loop at 1 (skip control edge) for most Nodes. SCMemProj's might be the 676 * sole use of a StoreLConditional. While StoreLConditionals set memory (the 677 * SCMemProj use) they also def flags; if that flag def is unused the allocator 678 * sees a flag-setting instruction with no use of the flags and assumes it's 679 * dead. This keeps the (useless) flag-setting behavior alive while also 680 * keeping the (useful) memory update effect. 681 */ 682 void PhaseChaitin::add_input_to_liveout(Block* b, Node* n, IndexSet* liveout, double cost, Pressure& int_pressure, Pressure& float_pressure) { 683 JVMState* jvms = n->jvms(); 684 uint debug_start = jvms ? jvms->debug_start() : 999999; 685 686 for (uint k = ((n->Opcode() == Op_SCMemProj || n->Opcode() == Op_ShenandoahWBMemProj) ? 0:1); k < n->req(); k++) { 687 Node* def = n->in(k); 688 uint lid = _lrg_map.live_range_id(def); 689 if (!lid) { 690 continue; 691 } 692 LRG& lrg = lrgs(lid); 693 694 // No use-side cost for spilling debug info 695 if (k < debug_start) { 696 // A USE costs twice block frequency (once for the Load, once 697 // for a Load-delay). Rematerialized uses only cost once. 698 lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq * 2)); 699 } 700 701 if (liveout->insert(lid)) { 702 // Newly live things assumed live from here to top of block 703 lrg._area += cost; 704 raise_pressure(b, lrg, int_pressure, float_pressure); 705 assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect"); 706 assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect"); 707 } 708 assert(lrg._area >= 0.0, "negative spill area" ); 709 } 710 } 711 712 /* 713 * If we run off the top of the block with high pressure just record that the 714 * whole block is high pressure. (Even though we might have a transition 715 * later down in the block) 716 */ 717 void PhaseChaitin::check_for_high_pressure_block(Pressure& pressure) { 718 // current pressure now means the pressure before the first instruction in the block 719 // (since we have stepped through all instructions backwards) 720 if (pressure.current_pressure() > pressure.high_pressure_limit()) { 721 pressure.set_high_pressure_index_to_block_start(); 722 } 723 } 724 725 /* 726 * Compute high pressure indice; avoid landing in the middle of projnodes 727 * and set the high pressure index for the block 728 */ 729 void PhaseChaitin::adjust_high_pressure_index(Block* b, uint& block_hrp_index, Pressure& pressure) { 730 uint i = pressure.high_pressure_index(); 731 if (i < b->number_of_nodes() && i < b->end_idx() + 1) { 732 Node* cur = b->get_node(i); 733 while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) { 734 cur = b->get_node(--i); 735 } 736 } 737 block_hrp_index = i; 738 } 739 740 /* Build an interference graph: 741 * That is, if 2 live ranges are simultaneously alive but in their acceptable 742 * register sets do not overlap, then they do not interfere. The IFG is built 743 * by a single reverse pass over each basic block. Starting with the known 744 * live-out set, we remove things that get defined and add things that become 745 * live (essentially executing one pass of a standard LIVE analysis). Just 746 * before a Node defines a value (and removes it from the live-ness set) that 747 * value is certainly live. The defined value interferes with everything 748 * currently live. The value is then removed from the live-ness set and it's 749 * inputs are added to the live-ness set. 750 * Compute register pressure for each block: 751 * We store the biggest register pressure for each block and also the first 752 * low to high register pressure transition within the block (if any). 753 */ 754 uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { 755 Compile::TracePhase tp("buildIFG", &timers[_t_buildIFGphysical]); 756 757 uint must_spill = 0; 758 for (uint i = 0; i < _cfg.number_of_blocks(); i++) { 759 Block* block = _cfg.get_block(i); 760 761 // Clone (rather than smash in place) the liveout info, so it is alive 762 // for the "collect_gc_info" phase later. 763 IndexSet liveout(_live->live(block)); 764 765 uint first_inst = first_nonphi_index(block); 766 uint last_inst = block->end_idx(); 767 768 move_exception_node_up(block, first_inst, last_inst); 769 770 Pressure int_pressure(last_inst + 1, INTPRESSURE); 771 Pressure float_pressure(last_inst + 1, FLOATPRESSURE); 772 block->_reg_pressure = 0; 773 block->_freg_pressure = 0; 774 775 int inst_count = last_inst - first_inst; 776 double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count); 777 assert(cost >= 0.0, "negative spill cost" ); 778 779 compute_initial_block_pressure(block, &liveout, int_pressure, float_pressure, cost); 780 781 for (uint location = last_inst; location > 0; location--) { 782 Node* n = block->get_node(location); 783 uint lid = _lrg_map.live_range_id(n); 784 785 if(lid) { 786 LRG& lrg = lrgs(lid); 787 788 // A DEF normally costs block frequency; rematerialized values are 789 // removed from the DEF sight, so LOWER costs here. 790 lrg._cost += n->rematerialize() ? 0 : block->_freq; 791 792 if (!liveout.member(lid) && n->Opcode() != Op_SafePoint) { 793 if (remove_node_if_not_used(block, location, n, lid, &liveout)) { 794 float_pressure.lower_high_pressure_index(); 795 int_pressure.lower_high_pressure_index(); 796 continue; 797 } 798 if (lrg._fat_proj) { 799 check_for_high_pressure_transition_at_fatproj(block->_reg_pressure, location, lrg, int_pressure, Op_RegI); 800 check_for_high_pressure_transition_at_fatproj(block->_freg_pressure, location, lrg, float_pressure, Op_RegD); 801 } 802 } else { 803 // A live range ends at its definition, remove the remaining area. 804 // If the cost is +Inf (which might happen in extreme cases), the lrg area will also be +Inf, 805 // and +Inf - +Inf = NaN. So let's not do that subtraction. 806 if (g_isfinite(cost)) { 807 lrg._area -= cost; 808 } 809 assert(lrg._area >= 0.0, "negative spill area" ); 810 811 assign_high_score_to_immediate_copies(block, n, lrg, location + 1, last_inst); 812 813 if (liveout.remove(lid)) { 814 lower_pressure(block, location, lrg, &liveout, int_pressure, float_pressure); 815 } 816 uint copy_idx = n->is_Copy(); 817 if (copy_idx) { 818 uint lid_copy = _lrg_map.live_range_id(n->in(copy_idx)); 819 remove_interference_from_copy(block, location, lid_copy, &liveout, cost, int_pressure, float_pressure); 820 } 821 } 822 823 // Since rematerializable DEFs are not bound but the live range is, 824 // some uses must be bound. If we spill live range 'r', it can 825 // rematerialize at each use site according to its bindings. 826 if (lrg.is_bound() && !n->rematerialize() && lrg.mask().is_NotEmpty()) { 827 remove_bound_register_from_interfering_live_ranges(lrg, &liveout, must_spill); 828 } 829 interfere_with_live(lid, &liveout); 830 } 831 832 // Area remaining in the block 833 inst_count--; 834 cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count); 835 836 if (!n->is_Phi()) { 837 add_input_to_liveout(block, n, &liveout, cost, int_pressure, float_pressure); 838 } 839 } 840 841 check_for_high_pressure_block(int_pressure); 842 check_for_high_pressure_block(float_pressure); 843 adjust_high_pressure_index(block, block->_ihrp_index, int_pressure); 844 adjust_high_pressure_index(block, block->_fhrp_index, float_pressure); 845 // set the final_pressure as the register pressure for the block 846 block->_reg_pressure = int_pressure.final_pressure(); 847 block->_freg_pressure = float_pressure.final_pressure(); 848 849 #ifndef PRODUCT 850 // Gather Register Pressure Statistics 851 if (PrintOptoStatistics) { 852 if (block->_reg_pressure > int_pressure.high_pressure_limit() || block->_freg_pressure > float_pressure.high_pressure_limit()) { 853 _high_pressure++; 854 } else { 855 _low_pressure++; 856 } 857 } 858 #endif 859 } 860 861 return must_spill; 862 }