1 /* 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/chaitin.hpp" 33 #include "opto/loopnode.hpp" 34 #include "opto/machnode.hpp" 35 36 //------------------------------Split-------------------------------------- 37 // Walk the graph in RPO and for each lrg which spills, propagate reaching 38 // definitions. During propagation, split the live range around regions of 39 // High Register Pressure (HRP). If a Def is in a region of Low Register 40 // Pressure (LRP), it will not get spilled until we encounter a region of 41 // HRP between it and one of its uses. We will spill at the transition 42 // point between LRP and HRP. Uses in the HRP region will use the spilled 43 // Def. The first Use outside the HRP region will generate a SpillCopy to 44 // hoist the live range back up into a register, and all subsequent uses 45 // will use that new Def until another HRP region is encountered. Defs in 46 // HRP regions will get trailing SpillCopies to push the LRG down into the 47 // stack immediately. 48 // 49 // As a side effect, unlink from (hence make dead) coalesced copies. 50 // 51 52 static const char out_of_nodes[] = "out of nodes during split"; 53 54 static bool contains_no_live_range_input(const Node* def) { 55 for (uint i = 1; i < def->req(); ++i) { 56 if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) { 57 return false; 58 } 59 } 60 return true; 61 } 62 63 //------------------------------get_spillcopy_wide----------------------------- 64 // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the 65 // wide ideal-register spill-mask if possible. If the 'wide-mask' does 66 // not cover the input (or output), use the input (or output) mask instead. 67 Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) { 68 // If ideal reg doesn't exist we've got a bad schedule happening 69 // that is forcing us to spill something that isn't spillable. 70 // Bail rather than abort 71 int ireg = def->ideal_reg(); 72 if( ireg == 0 || ireg == Op_RegFlags ) { 73 assert(false, "attempted to spill a non-spillable item"); 74 C->record_method_not_compilable("attempted to spill a non-spillable item"); 75 return NULL; 76 } 77 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 78 return NULL; 79 } 80 const RegMask *i_mask = &def->out_RegMask(); 81 const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg]; 82 const RegMask *o_mask = use ? &use->in_RegMask(uidx) : w_mask; 83 const RegMask *w_i_mask = w_mask->overlap( *i_mask ) ? w_mask : i_mask; 84 const RegMask *w_o_mask; 85 86 int num_regs = RegMask::num_registers(ireg); 87 bool is_vect = RegMask::is_vector(ireg); 88 if( w_mask->overlap( *o_mask ) && // Overlap AND 89 ((num_regs == 1) // Single use or aligned 90 || is_vect // or vector 91 || !is_vect && o_mask->is_aligned_pairs()) ) { 92 assert(!is_vect || o_mask->is_aligned_sets(num_regs), "vectors are aligned"); 93 // Don't come here for mis-aligned doubles 94 w_o_mask = w_mask; 95 } else { // wide ideal mask does not overlap with o_mask 96 // Mis-aligned doubles come here and XMM->FPR moves on x86. 97 w_o_mask = o_mask; // Must target desired registers 98 // Does the ideal-reg-mask overlap with o_mask? I.e., can I use 99 // a reg-reg move or do I need a trip across register classes 100 // (and thus through memory)? 101 if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() ) 102 // Here we assume a trip through memory is required. 103 w_i_mask = &C->FIRST_STACK_mask(); 104 } 105 return new (C) MachSpillCopyNode( def, *w_i_mask, *w_o_mask ); 106 } 107 108 //------------------------------insert_proj------------------------------------ 109 // Insert the spill at chosen location. Skip over any intervening Proj's or 110 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block 111 // instead. Update high-pressure indices. Create a new live range. 112 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) { 113 // Skip intervening ProjNodes. Do not insert between a ProjNode and 114 // its definer. 115 while( i < b->_nodes.size() && 116 (b->_nodes[i]->is_Proj() || 117 b->_nodes[i]->is_Phi() ) ) 118 i++; 119 120 // Do not insert between a call and his Catch 121 if( b->_nodes[i]->is_Catch() ) { 122 // Put the instruction at the top of the fall-thru block. 123 // Find the fall-thru projection 124 while( 1 ) { 125 const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj(); 126 if( cp->_con == CatchProjNode::fall_through_index ) 127 break; 128 } 129 int sidx = i - b->end_idx()-1; 130 b = b->_succs[sidx]; // Switch to successor block 131 i = 1; // Right at start of block 132 } 133 134 b->_nodes.insert(i,spill); // Insert node in block 135 _cfg._bbs.map(spill->_idx,b); // Update node->block mapping to reflect 136 // Adjust the point where we go hi-pressure 137 if( i <= b->_ihrp_index ) b->_ihrp_index++; 138 if( i <= b->_fhrp_index ) b->_fhrp_index++; 139 140 // Assign a new Live Range Number to the SpillCopy and grow 141 // the node->live range mapping. 142 new_lrg(spill,maxlrg); 143 } 144 145 //------------------------------split_DEF-------------------------------------- 146 // There are four categories of Split; UP/DOWN x DEF/USE 147 // Only three of these really occur as DOWN/USE will always color 148 // Any Split with a DEF cannot CISC-Spill now. Thus we need 149 // two helper routines, one for Split DEFS (insert after instruction), 150 // one for Split USES (insert before instruction). DEF insertion 151 // happens inside Split, where the Leaveblock array is updated. 152 uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx ) { 153 #ifdef ASSERT 154 // Increment the counter for this lrg 155 splits.at_put(slidx, splits.at(slidx)+1); 156 #endif 157 // If we are spilling the memory op for an implicit null check, at the 158 // null check location (ie - null check is in HRP block) we need to do 159 // the null-check first, then spill-down in the following block. 160 // (The implicit_null_check function ensures the use is also dominated 161 // by the branch-not-taken block.) 162 Node *be = b->end(); 163 if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) { 164 // Spill goes in the branch-not-taken block 165 b = b->_succs[b->_nodes[b->end_idx()+1]->Opcode() == Op_IfTrue]; 166 loc = 0; // Just past the Region 167 } 168 assert( loc >= 0, "must insert past block head" ); 169 170 // Get a def-side SpillCopy 171 Node *spill = get_spillcopy_wide(def,NULL,0); 172 // Did we fail to split?, then bail 173 if (!spill) { 174 return 0; 175 } 176 177 // Insert the spill at chosen location 178 insert_proj( b, loc+1, spill, maxlrg++); 179 180 // Insert new node into Reaches array 181 Reachblock[slidx] = spill; 182 // Update debug list of reaching down definitions by adding this one 183 debug_defs[slidx] = spill; 184 185 // return updated count of live ranges 186 return maxlrg; 187 } 188 189 //------------------------------split_USE-------------------------------------- 190 // Splits at uses can involve redeffing the LRG, so no CISC Spilling there. 191 // Debug uses want to know if def is already stack enabled. 192 uint PhaseChaitin::split_USE( Node *def, Block *b, Node *use, uint useidx, uint maxlrg, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx ) { 193 #ifdef ASSERT 194 // Increment the counter for this lrg 195 splits.at_put(slidx, splits.at(slidx)+1); 196 #endif 197 198 // Some setup stuff for handling debug node uses 199 JVMState* jvms = use->jvms(); 200 uint debug_start = jvms ? jvms->debug_start() : 999999; 201 uint debug_end = jvms ? jvms->debug_end() : 999999; 202 203 //------------------------------------------- 204 // Check for use of debug info 205 if (useidx >= debug_start && useidx < debug_end) { 206 // Actually it's perfectly legal for constant debug info to appear 207 // just unlikely. In this case the optimizer left a ConI of a 4 208 // as both inputs to a Phi with only a debug use. It's a single-def 209 // live range of a rematerializable value. The live range spills, 210 // rematerializes and now the ConI directly feeds into the debug info. 211 // assert(!def->is_Con(), "constant debug info already constructed directly"); 212 213 // Special split handling for Debug Info 214 // If DEF is DOWN, just hook the edge and return 215 // If DEF is UP, Split it DOWN for this USE. 216 if( def->is_Mach() ) { 217 if( def_down ) { 218 // DEF is DOWN, so connect USE directly to the DEF 219 use->set_req(useidx, def); 220 } else { 221 // Block and index where the use occurs. 222 Block *b = _cfg._bbs[use->_idx]; 223 // Put the clone just prior to use 224 int bindex = b->find_node(use); 225 // DEF is UP, so must copy it DOWN and hook in USE 226 // Insert SpillCopy before the USE, which uses DEF as its input, 227 // and defs a new live range, which is used by this node. 228 Node *spill = get_spillcopy_wide(def,use,useidx); 229 // did we fail to split? 230 if (!spill) { 231 // Bail 232 return 0; 233 } 234 // insert into basic block 235 insert_proj( b, bindex, spill, maxlrg++ ); 236 // Use the new split 237 use->set_req(useidx,spill); 238 } 239 // No further split handling needed for this use 240 return maxlrg; 241 } // End special splitting for debug info live range 242 } // If debug info 243 244 // CISC-SPILLING 245 // Finally, check to see if USE is CISC-Spillable, and if so, 246 // gather_lrg_masks will add the flags bit to its mask, and 247 // no use side copy is needed. This frees up the live range 248 // register choices without causing copy coalescing, etc. 249 if( UseCISCSpill && cisc_sp ) { 250 int inp = use->cisc_operand(); 251 if( inp != AdlcVMDeps::Not_cisc_spillable ) 252 // Convert operand number to edge index number 253 inp = use->as_Mach()->operand_index(inp); 254 if( inp == (int)useidx ) { 255 use->set_req(useidx, def); 256 #ifndef PRODUCT 257 if( TraceCISCSpill ) { 258 tty->print(" set_split: "); 259 use->dump(); 260 } 261 #endif 262 return maxlrg; 263 } 264 } 265 266 //------------------------------------------- 267 // Insert a Copy before the use 268 269 // Block and index where the use occurs. 270 int bindex; 271 // Phi input spill-copys belong at the end of the prior block 272 if( use->is_Phi() ) { 273 b = _cfg._bbs[b->pred(useidx)->_idx]; 274 bindex = b->end_idx(); 275 } else { 276 // Put the clone just prior to use 277 bindex = b->find_node(use); 278 } 279 280 Node *spill = get_spillcopy_wide( def, use, useidx ); 281 if( !spill ) return 0; // Bailed out 282 // Insert SpillCopy before the USE, which uses the reaching DEF as 283 // its input, and defs a new live range, which is used by this node. 284 insert_proj( b, bindex, spill, maxlrg++ ); 285 // Use the spill/clone 286 use->set_req(useidx,spill); 287 288 // return updated live range count 289 return maxlrg; 290 } 291 292 //------------------------------clone_node---------------------------- 293 // Clone node with anti dependence check. 294 Node* clone_node(Node* def, Block *b, Compile* C) { 295 if (def->needs_anti_dependence_check()) { 296 #ifdef ASSERT 297 if (Verbose) { 298 tty->print_cr("RA attempts to clone node with anti_dependence:"); 299 def->dump(-1); tty->cr(); 300 tty->print_cr("into block:"); 301 b->dump(); 302 } 303 #endif 304 if (C->subsume_loads() == true && !C->failing()) { 305 // Retry with subsume_loads == false 306 // If this is the first failure, the sentinel string will "stick" 307 // to the Compile object, and the C2Compiler will see it and retry. 308 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 309 } else { 310 // Bailout without retry 311 C->record_method_not_compilable("RA Split failed: attempt to clone node with anti_dependence"); 312 } 313 return 0; 314 } 315 return def->clone(); 316 } 317 318 //------------------------------split_Rematerialize---------------------------- 319 // Clone a local copy of the def. 320 Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ) { 321 // The input live ranges will be stretched to the site of the new 322 // instruction. They might be stretched past a def and will thus 323 // have the old and new values of the same live range alive at the 324 // same time - a definite no-no. Split out private copies of 325 // the inputs. 326 if( def->req() > 1 ) { 327 for( uint i = 1; i < def->req(); i++ ) { 328 Node *in = def->in(i); 329 // Check for single-def (LRG cannot redefined) 330 uint lidx = _lrg_map.live_range_id(in); 331 if (lidx >= _lrg_map.max_lrg_id()) { 332 continue; // Value is a recent spill-copy 333 } 334 if (lrgs(lidx).is_singledef()) { 335 continue; 336 } 337 338 Block *b_def = _cfg._bbs[def->_idx]; 339 int idx_def = b_def->find_node(def); 340 Node *in_spill = get_spillcopy_wide( in, def, i ); 341 if( !in_spill ) return 0; // Bailed out 342 insert_proj(b_def,idx_def,in_spill,maxlrg++); 343 if( b_def == b ) 344 insidx++; 345 def->set_req(i,in_spill); 346 } 347 } 348 349 Node *spill = clone_node(def, b, C); 350 if (spill == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 351 // Check when generating nodes 352 return 0; 353 } 354 355 // See if any inputs are currently being spilled, and take the 356 // latest copy of spilled inputs. 357 if( spill->req() > 1 ) { 358 for( uint i = 1; i < spill->req(); i++ ) { 359 Node *in = spill->in(i); 360 uint lidx = _lrg_map.find_id(in); 361 362 // Walk backwards thru spill copy node intermediates 363 if (walkThru) { 364 while (in->is_SpillCopy() && lidx >= _lrg_map.max_lrg_id()) { 365 in = in->in(1); 366 lidx = _lrg_map.find_id(in); 367 } 368 369 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_multidef()) { 370 // walkThru found a multidef LRG, which is unsafe to use, so 371 // just keep the original def used in the clone. 372 in = spill->in(i); 373 lidx = _lrg_map.find_id(in); 374 } 375 } 376 377 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) { 378 Node *rdef = Reachblock[lrg2reach[lidx]]; 379 if (rdef) { 380 spill->set_req(i, rdef); 381 } 382 } 383 } 384 } 385 386 387 assert( spill->out_RegMask().is_UP(), "rematerialize to a reg" ); 388 // Rematerialized op is def->spilled+1 389 set_was_spilled(spill); 390 if( _spilled_once.test(def->_idx) ) 391 set_was_spilled(spill); 392 393 insert_proj( b, insidx, spill, maxlrg++ ); 394 #ifdef ASSERT 395 // Increment the counter for this lrg 396 splits.at_put(slidx, splits.at(slidx)+1); 397 #endif 398 // See if the cloned def kills any flags, and copy those kills as well 399 uint i = insidx+1; 400 if( clone_projs( b, i, def, spill, maxlrg) ) { 401 // Adjust the point where we go hi-pressure 402 if( i <= b->_ihrp_index ) b->_ihrp_index++; 403 if( i <= b->_fhrp_index ) b->_fhrp_index++; 404 } 405 406 return spill; 407 } 408 409 //------------------------------is_high_pressure------------------------------- 410 // Function to compute whether or not this live range is "high pressure" 411 // in this block - whether it spills eagerly or not. 412 bool PhaseChaitin::is_high_pressure( Block *b, LRG *lrg, uint insidx ) { 413 if( lrg->_was_spilled1 ) return true; 414 // Forced spilling due to conflict? Then split only at binding uses 415 // or defs, not for supposed capacity problems. 416 // CNC - Turned off 7/8/99, causes too much spilling 417 // if( lrg->_is_bound ) return false; 418 419 // Use float pressure numbers for vectors. 420 bool is_float_or_vector = lrg->_is_float || lrg->_is_vector; 421 // Not yet reached the high-pressure cutoff point, so low pressure 422 uint hrp_idx = is_float_or_vector ? b->_fhrp_index : b->_ihrp_index; 423 if( insidx < hrp_idx ) return false; 424 // Register pressure for the block as a whole depends on reg class 425 int block_pres = is_float_or_vector ? b->_freg_pressure : b->_reg_pressure; 426 // Bound live ranges will split at the binding points first; 427 // Intermediate splits should assume the live range's register set 428 // got "freed up" and that num_regs will become INT_PRESSURE. 429 int bound_pres = is_float_or_vector ? FLOATPRESSURE : INTPRESSURE; 430 // Effective register pressure limit. 431 int lrg_pres = (lrg->get_invalid_mask_size() > lrg->num_regs()) 432 ? (lrg->get_invalid_mask_size() >> (lrg->num_regs()-1)) : bound_pres; 433 // High pressure if block pressure requires more register freedom 434 // than live range has. 435 return block_pres >= lrg_pres; 436 } 437 438 439 //------------------------------prompt_use--------------------------------- 440 // True if lidx is used before any real register is def'd in the block 441 bool PhaseChaitin::prompt_use( Block *b, uint lidx ) { 442 if (lrgs(lidx)._was_spilled2) { 443 return false; 444 } 445 446 // Scan block for 1st use. 447 for( uint i = 1; i <= b->end_idx(); i++ ) { 448 Node *n = b->_nodes[i]; 449 // Ignore PHI use, these can be up or down 450 if (n->is_Phi()) { 451 continue; 452 } 453 for (uint j = 1; j < n->req(); j++) { 454 if (_lrg_map.find_id(n->in(j)) == lidx) { 455 return true; // Found 1st use! 456 } 457 } 458 if (n->out_RegMask().is_NotEmpty()) { 459 return false; 460 } 461 } 462 return false; 463 } 464 465 //------------------------------Split-------------------------------------- 466 //----------Split Routine---------- 467 // ***** NEW SPLITTING HEURISTIC ***** 468 // DEFS: If the DEF is in a High Register Pressure(HRP) Block, split there. 469 // Else, no split unless there is a HRP block between a DEF and 470 // one of its uses, and then split at the HRP block. 471 // 472 // USES: If USE is in HRP, split at use to leave main LRG on stack. 473 // Else, hoist LRG back up to register only (ie - split is also DEF) 474 // We will compute a new maxlrg as we go 475 uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { 476 NOT_PRODUCT( Compile::TracePhase t3("regAllocSplit", &_t_regAllocSplit, TimeCompiler); ) 477 478 // Free thread local resources used by this method on exit. 479 ResourceMark rm(split_arena); 480 481 uint bidx, pidx, slidx, insidx, inpidx, twoidx; 482 uint non_phi = 1, spill_cnt = 0; 483 Node **Reachblock; 484 Node *n1, *n2, *n3; 485 Node_List *defs,*phis; 486 bool *UPblock; 487 bool u1, u2, u3; 488 Block *b, *pred; 489 PhiNode *phi; 490 GrowableArray<uint> lidxs(split_arena, maxlrg, 0, 0); 491 492 // Array of counters to count splits per live range 493 GrowableArray<uint> splits(split_arena, maxlrg, 0, 0); 494 495 #define NEW_SPLIT_ARRAY(type, size)\ 496 (type*) split_arena->allocate_bytes((size) * sizeof(type)) 497 498 //----------Setup Code---------- 499 // Create a convenient mapping from lrg numbers to reaches/leaves indices 500 uint *lrg2reach = NEW_SPLIT_ARRAY(uint, maxlrg); 501 // Keep track of DEFS & Phis for later passes 502 defs = new Node_List(); 503 phis = new Node_List(); 504 // Gather info on which LRG's are spilling, and build maps 505 for (bidx = 1; bidx < maxlrg; bidx++) { 506 if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) { 507 assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color"); 508 lrg2reach[bidx] = spill_cnt; 509 spill_cnt++; 510 lidxs.append(bidx); 511 #ifdef ASSERT 512 // Initialize the split counts to zero 513 splits.append(0); 514 #endif 515 #ifndef PRODUCT 516 if( PrintOpto && WizardMode && lrgs(bidx)._was_spilled1 ) 517 tty->print_cr("Warning, 2nd spill of L%d",bidx); 518 #endif 519 } 520 } 521 522 // Create side arrays for propagating reaching defs info. 523 // Each block needs a node pointer for each spilling live range for the 524 // Def which is live into the block. Phi nodes handle multiple input 525 // Defs by querying the output of their predecessor blocks and resolving 526 // them to a single Def at the phi. The pointer is updated for each 527 // Def in the block, and then becomes the output for the block when 528 // processing of the block is complete. We also need to track whether 529 // a Def is UP or DOWN. UP means that it should get a register (ie - 530 // it is always in LRP regions), and DOWN means that it is probably 531 // on the stack (ie - it crosses HRP regions). 532 Node ***Reaches = NEW_SPLIT_ARRAY( Node**, _cfg._num_blocks+1 ); 533 bool **UP = NEW_SPLIT_ARRAY( bool*, _cfg._num_blocks+1 ); 534 Node **debug_defs = NEW_SPLIT_ARRAY( Node*, spill_cnt ); 535 VectorSet **UP_entry= NEW_SPLIT_ARRAY( VectorSet*, spill_cnt ); 536 537 // Initialize Reaches & UP 538 for( bidx = 0; bidx < _cfg._num_blocks+1; bidx++ ) { 539 Reaches[bidx] = NEW_SPLIT_ARRAY( Node*, spill_cnt ); 540 UP[bidx] = NEW_SPLIT_ARRAY( bool, spill_cnt ); 541 Node **Reachblock = Reaches[bidx]; 542 bool *UPblock = UP[bidx]; 543 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 544 UPblock[slidx] = true; // Assume they start in registers 545 Reachblock[slidx] = NULL; // Assume that no def is present 546 } 547 } 548 549 #undef NEW_SPLIT_ARRAY 550 551 // Initialize to array of empty vectorsets 552 for( slidx = 0; slidx < spill_cnt; slidx++ ) 553 UP_entry[slidx] = new VectorSet(split_arena); 554 555 //----------PASS 1---------- 556 //----------Propagation & Node Insertion Code---------- 557 // Walk the Blocks in RPO for DEF & USE info 558 for( bidx = 0; bidx < _cfg._num_blocks; bidx++ ) { 559 560 if (C->check_node_count(spill_cnt, out_of_nodes)) { 561 return 0; 562 } 563 564 b = _cfg._blocks[bidx]; 565 // Reaches & UP arrays for this block 566 Reachblock = Reaches[b->_pre_order]; 567 UPblock = UP[b->_pre_order]; 568 // Reset counter of start of non-Phi nodes in block 569 non_phi = 1; 570 //----------Block Entry Handling---------- 571 // Check for need to insert a new phi 572 // Cycle through this block's predecessors, collecting Reaches 573 // info for each spilled LRG. If they are identical, no phi is 574 // needed. If they differ, check for a phi, and insert if missing, 575 // or update edges if present. Set current block's Reaches set to 576 // be either the phi's or the reaching def, as appropriate. 577 // If no Phi is needed, check if the LRG needs to spill on entry 578 // to the block due to HRP. 579 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 580 // Grab the live range number 581 uint lidx = lidxs.at(slidx); 582 // Do not bother splitting or putting in Phis for single-def 583 // rematerialized live ranges. This happens alot to constants 584 // with long live ranges. 585 if( lrgs(lidx).is_singledef() && 586 lrgs(lidx)._def->rematerialize() ) { 587 // reset the Reaches & UP entries 588 Reachblock[slidx] = lrgs(lidx)._def; 589 UPblock[slidx] = true; 590 // Record following instruction in case 'n' rematerializes and 591 // kills flags 592 Block *pred1 = _cfg._bbs[b->pred(1)->_idx]; 593 continue; 594 } 595 596 // Initialize needs_phi and needs_split 597 bool needs_phi = false; 598 bool needs_split = false; 599 bool has_phi = false; 600 // Walk the predecessor blocks to check inputs for that live range 601 // Grab predecessor block header 602 n1 = b->pred(1); 603 // Grab the appropriate reaching def info for inpidx 604 pred = _cfg._bbs[n1->_idx]; 605 pidx = pred->_pre_order; 606 Node **Ltmp = Reaches[pidx]; 607 bool *Utmp = UP[pidx]; 608 n1 = Ltmp[slidx]; 609 u1 = Utmp[slidx]; 610 // Initialize node for saving type info 611 n3 = n1; 612 u3 = u1; 613 614 // Compare inputs to see if a Phi is needed 615 for( inpidx = 2; inpidx < b->num_preds(); inpidx++ ) { 616 // Grab predecessor block headers 617 n2 = b->pred(inpidx); 618 // Grab the appropriate reaching def info for inpidx 619 pred = _cfg._bbs[n2->_idx]; 620 pidx = pred->_pre_order; 621 Ltmp = Reaches[pidx]; 622 Utmp = UP[pidx]; 623 n2 = Ltmp[slidx]; 624 u2 = Utmp[slidx]; 625 // For each LRG, decide if a phi is necessary 626 if( n1 != n2 ) { 627 needs_phi = true; 628 } 629 // See if the phi has mismatched inputs, UP vs. DOWN 630 if( n1 && n2 && (u1 != u2) ) { 631 needs_split = true; 632 } 633 // Move n2/u2 to n1/u1 for next iteration 634 n1 = n2; 635 u1 = u2; 636 // Preserve a non-NULL predecessor for later type referencing 637 if( (n3 == NULL) && (n2 != NULL) ){ 638 n3 = n2; 639 u3 = u2; 640 } 641 } // End for all potential Phi inputs 642 643 // check block for appropriate phinode & update edges 644 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { 645 n1 = b->_nodes[insidx]; 646 // bail if this is not a phi 647 phi = n1->is_Phi() ? n1->as_Phi() : NULL; 648 if( phi == NULL ) { 649 // Keep track of index of first non-PhiNode instruction in block 650 non_phi = insidx; 651 // break out of the for loop as we have handled all phi nodes 652 break; 653 } 654 // must be looking at a phi 655 if (_lrg_map.find_id(n1) == lidxs.at(slidx)) { 656 // found the necessary phi 657 needs_phi = false; 658 has_phi = true; 659 // initialize the Reaches entry for this LRG 660 Reachblock[slidx] = phi; 661 break; 662 } // end if found correct phi 663 } // end for all phi's 664 665 // If a phi is needed or exist, check for it 666 if( needs_phi || has_phi ) { 667 // add new phinode if one not already found 668 if( needs_phi ) { 669 // create a new phi node and insert it into the block 670 // type is taken from left over pointer to a predecessor 671 assert(n3,"No non-NULL reaching DEF for a Phi"); 672 phi = new (C) PhiNode(b->head(), n3->bottom_type()); 673 // initialize the Reaches entry for this LRG 674 Reachblock[slidx] = phi; 675 676 // add node to block & node_to_block mapping 677 insert_proj(b, insidx++, phi, maxlrg++); 678 non_phi++; 679 // Reset new phi's mapping to be the spilling live range 680 _lrg_map.map(phi->_idx, lidx); 681 assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping"); 682 } // end if not found correct phi 683 // Here you have either found or created the Phi, so record it 684 assert(phi != NULL,"Must have a Phi Node here"); 685 phis->push(phi); 686 // PhiNodes should either force the LRG UP or DOWN depending 687 // on its inputs and the register pressure in the Phi's block. 688 UPblock[slidx] = true; // Assume new DEF is UP 689 // If entering a high-pressure area with no immediate use, 690 // assume Phi is DOWN 691 if( is_high_pressure( b, &lrgs(lidx), b->end_idx()) && !prompt_use(b,lidx) ) 692 UPblock[slidx] = false; 693 // If we are not split up/down and all inputs are down, then we 694 // are down 695 if( !needs_split && !u3 ) 696 UPblock[slidx] = false; 697 } // end if phi is needed 698 699 // Do not need a phi, so grab the reaching DEF 700 else { 701 // Grab predecessor block header 702 n1 = b->pred(1); 703 // Grab the appropriate reaching def info for k 704 pred = _cfg._bbs[n1->_idx]; 705 pidx = pred->_pre_order; 706 Node **Ltmp = Reaches[pidx]; 707 bool *Utmp = UP[pidx]; 708 // reset the Reaches & UP entries 709 Reachblock[slidx] = Ltmp[slidx]; 710 UPblock[slidx] = Utmp[slidx]; 711 } // end else no Phi is needed 712 } // end for all spilling live ranges 713 // DEBUG 714 #ifndef PRODUCT 715 if(trace_spilling()) { 716 tty->print("/`\nBlock %d: ", b->_pre_order); 717 tty->print("Reaching Definitions after Phi handling\n"); 718 for( uint x = 0; x < spill_cnt; x++ ) { 719 tty->print("Spill Idx %d: UP %d: Node\n",x,UPblock[x]); 720 if( Reachblock[x] ) 721 Reachblock[x]->dump(); 722 else 723 tty->print("Undefined\n"); 724 } 725 } 726 #endif 727 728 //----------Non-Phi Node Splitting---------- 729 // Since phi-nodes have now been handled, the Reachblock array for this 730 // block is initialized with the correct starting value for the defs which 731 // reach non-phi instructions in this block. Thus, process non-phi 732 // instructions normally, inserting SpillCopy nodes for all spill 733 // locations. 734 735 // Memoize any DOWN reaching definitions for use as DEBUG info 736 for( insidx = 0; insidx < spill_cnt; insidx++ ) { 737 debug_defs[insidx] = (UPblock[insidx]) ? NULL : Reachblock[insidx]; 738 if( UPblock[insidx] ) // Memoize UP decision at block start 739 UP_entry[insidx]->set( b->_pre_order ); 740 } 741 742 //----------Walk Instructions in the Block and Split---------- 743 // For all non-phi instructions in the block 744 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { 745 Node *n = b->_nodes[insidx]; 746 // Find the defining Node's live range index 747 uint defidx = _lrg_map.find_id(n); 748 uint cnt = n->req(); 749 750 if (n->is_Phi()) { 751 // Skip phi nodes after removing dead copies. 752 if (defidx < _lrg_map.max_lrg_id()) { 753 // Check for useless Phis. These appear if we spill, then 754 // coalesce away copies. Dont touch Phis in spilling live 755 // ranges; they are busy getting modifed in this pass. 756 if( lrgs(defidx).reg() < LRG::SPILL_REG ) { 757 uint i; 758 Node *u = NULL; 759 // Look for the Phi merging 2 unique inputs 760 for( i = 1; i < cnt; i++ ) { 761 // Ignore repeats and self 762 if( n->in(i) != u && n->in(i) != n ) { 763 // Found a unique input 764 if( u != NULL ) // If it's the 2nd, bail out 765 break; 766 u = n->in(i); // Else record it 767 } 768 } 769 assert( u, "at least 1 valid input expected" ); 770 if (i >= cnt) { // Found one unique input 771 assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg"); 772 n->replace_by(u); // Then replace with unique input 773 n->disconnect_inputs(NULL, C); 774 b->_nodes.remove(insidx); 775 insidx--; 776 b->_ihrp_index--; 777 b->_fhrp_index--; 778 } 779 } 780 } 781 continue; 782 } 783 assert( insidx > b->_ihrp_index || 784 (b->_reg_pressure < (uint)INTPRESSURE) || 785 b->_ihrp_index > 4000000 || 786 b->_ihrp_index >= b->end_idx() || 787 !b->_nodes[b->_ihrp_index]->is_Proj(), "" ); 788 assert( insidx > b->_fhrp_index || 789 (b->_freg_pressure < (uint)FLOATPRESSURE) || 790 b->_fhrp_index > 4000000 || 791 b->_fhrp_index >= b->end_idx() || 792 !b->_nodes[b->_fhrp_index]->is_Proj(), "" ); 793 794 // ********** Handle Crossing HRP Boundry ********** 795 if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) { 796 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 797 // Check for need to split at HRP boundary - split if UP 798 n1 = Reachblock[slidx]; 799 // bail out if no reaching DEF 800 if( n1 == NULL ) continue; 801 // bail out if live range is 'isolated' around inner loop 802 uint lidx = lidxs.at(slidx); 803 // If live range is currently UP 804 if( UPblock[slidx] ) { 805 // set location to insert spills at 806 // SPLIT DOWN HERE - NO CISC SPILL 807 if( is_high_pressure( b, &lrgs(lidx), insidx ) && 808 !n1->rematerialize() ) { 809 // If there is already a valid stack definition available, use it 810 if( debug_defs[slidx] != NULL ) { 811 Reachblock[slidx] = debug_defs[slidx]; 812 } 813 else { 814 // Insert point is just past last use or def in the block 815 int insert_point = insidx-1; 816 while( insert_point > 0 ) { 817 Node *n = b->_nodes[insert_point]; 818 // Hit top of block? Quit going backwards 819 if (n->is_Phi()) { 820 break; 821 } 822 // Found a def? Better split after it. 823 if (_lrg_map.live_range_id(n) == lidx) { 824 break; 825 } 826 // Look for a use 827 uint i; 828 for( i = 1; i < n->req(); i++ ) { 829 if (_lrg_map.live_range_id(n->in(i)) == lidx) { 830 break; 831 } 832 } 833 // Found a use? Better split after it. 834 if (i < n->req()) { 835 break; 836 } 837 insert_point--; 838 } 839 uint orig_eidx = b->end_idx(); 840 maxlrg = split_DEF( n1, b, insert_point, maxlrg, Reachblock, debug_defs, splits, slidx); 841 // If it wasn't split bail 842 if (!maxlrg) { 843 return 0; 844 } 845 // Spill of NULL check mem op goes into the following block. 846 if (b->end_idx() > orig_eidx) { 847 insidx++; 848 } 849 } 850 // This is a new DEF, so update UP 851 UPblock[slidx] = false; 852 #ifndef PRODUCT 853 // DEBUG 854 if( trace_spilling() ) { 855 tty->print("\nNew Split DOWN DEF of Spill Idx "); 856 tty->print("%d, UP %d:\n",slidx,false); 857 n1->dump(); 858 } 859 #endif 860 } 861 } // end if LRG is UP 862 } // end for all spilling live ranges 863 assert( b->_nodes[insidx] == n, "got insidx set incorrectly" ); 864 } // end if crossing HRP Boundry 865 866 // If the LRG index is oob, then this is a new spillcopy, skip it. 867 if (defidx >= _lrg_map.max_lrg_id()) { 868 continue; 869 } 870 LRG &deflrg = lrgs(defidx); 871 uint copyidx = n->is_Copy(); 872 // Remove coalesced copy from CFG 873 if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { 874 n->replace_by( n->in(copyidx) ); 875 n->set_req( copyidx, NULL ); 876 b->_nodes.remove(insidx--); 877 b->_ihrp_index--; // Adjust the point where we go hi-pressure 878 b->_fhrp_index--; 879 continue; 880 } 881 882 #define DERIVED 0 883 884 // ********** Handle USES ********** 885 bool nullcheck = false; 886 // Implicit null checks never use the spilled value 887 if( n->is_MachNullCheck() ) 888 nullcheck = true; 889 if( !nullcheck ) { 890 // Search all inputs for a Spill-USE 891 JVMState* jvms = n->jvms(); 892 uint oopoff = jvms ? jvms->oopoff() : cnt; 893 uint old_last = cnt - 1; 894 for( inpidx = 1; inpidx < cnt; inpidx++ ) { 895 // Derived/base pairs may be added to our inputs during this loop. 896 // If inpidx > old_last, then one of these new inputs is being 897 // handled. Skip the derived part of the pair, but process 898 // the base like any other input. 899 if (inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED) { 900 continue; // skip derived_debug added below 901 } 902 // Get lidx of input 903 uint useidx = _lrg_map.find_id(n->in(inpidx)); 904 // Not a brand-new split, and it is a spill use 905 if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) { 906 // Check for valid reaching DEF 907 slidx = lrg2reach[useidx]; 908 Node *def = Reachblock[slidx]; 909 assert( def != NULL, "Using Undefined Value in Split()\n"); 910 911 // (+++) %%%% remove this in favor of pre-pass in matcher.cpp 912 // monitor references do not care where they live, so just hook 913 if ( jvms && jvms->is_monitor_use(inpidx) ) { 914 // The effect of this clone is to drop the node out of the block, 915 // so that the allocator does not see it anymore, and therefore 916 // does not attempt to assign it a register. 917 def = clone_node(def, b, C); 918 if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 919 return 0; 920 } 921 _lrg_map.extend(def->_idx, 0); 922 _cfg._bbs.map(def->_idx,b); 923 n->set_req(inpidx, def); 924 continue; 925 } 926 927 // Rematerializable? Then clone def at use site instead 928 // of store/load 929 if( def->rematerialize() ) { 930 int old_size = b->_nodes.size(); 931 def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true ); 932 if( !def ) return 0; // Bail out 933 insidx += b->_nodes.size()-old_size; 934 } 935 936 MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL; 937 // Base pointers and oopmap references do not care where they live. 938 if ((inpidx >= oopoff) || 939 (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) { 940 if (def->rematerialize() && lrgs(useidx)._was_spilled2) { 941 // This def has been rematerialized a couple of times without 942 // progress. It doesn't care if it lives UP or DOWN, so 943 // spill it down now. 944 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false,splits,slidx); 945 // If it wasn't split bail 946 if (!maxlrg) { 947 return 0; 948 } 949 insidx++; // Reset iterator to skip USE side split 950 } else { 951 // Just hook the def edge 952 n->set_req(inpidx, def); 953 } 954 955 if (inpidx >= oopoff) { 956 // After oopoff, we have derived/base pairs. We must mention all 957 // derived pointers here as derived/base pairs for GC. If the 958 // derived value is spilling and we have a copy both in Reachblock 959 // (called here 'def') and debug_defs[slidx] we need to mention 960 // both in derived/base pairs or kill one. 961 Node *derived_debug = debug_defs[slidx]; 962 if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base? 963 mach && mach->ideal_Opcode() != Op_Halt && 964 derived_debug != NULL && 965 derived_debug != def ) { // Actual 2nd value appears 966 // We have already set 'def' as a derived value. 967 // Also set debug_defs[slidx] as a derived value. 968 uint k; 969 for( k = oopoff; k < cnt; k += 2 ) 970 if( n->in(k) == derived_debug ) 971 break; // Found an instance of debug derived 972 if( k == cnt ) {// No instance of debug_defs[slidx] 973 // Add a derived/base pair to cover the debug info. 974 // We have to process the added base later since it is not 975 // handled yet at this point but skip derived part. 976 assert(((n->req() - oopoff) & 1) == DERIVED, 977 "must match skip condition above"); 978 n->add_req( derived_debug ); // this will be skipped above 979 n->add_req( n->in(inpidx+1) ); // this will be processed 980 // Increment cnt to handle added input edges on 981 // subsequent iterations. 982 cnt += 2; 983 } 984 } 985 } 986 continue; 987 } 988 // Special logic for DEBUG info 989 if( jvms && b->_freq > BLOCK_FREQUENCY(0.5) ) { 990 uint debug_start = jvms->debug_start(); 991 // If this is debug info use & there is a reaching DOWN def 992 if ((debug_start <= inpidx) && (debug_defs[slidx] != NULL)) { 993 assert(inpidx < oopoff, "handle only debug info here"); 994 // Just hook it in & move on 995 n->set_req(inpidx, debug_defs[slidx]); 996 // (Note that this can make two sides of a split live at the 997 // same time: The debug def on stack, and another def in a 998 // register. The GC needs to know about both of them, but any 999 // derived pointers after oopoff will refer to only one of the 1000 // two defs and the GC would therefore miss the other. Thus 1001 // this hack is only allowed for debug info which is Java state 1002 // and therefore never a derived pointer.) 1003 continue; 1004 } 1005 } 1006 // Grab register mask info 1007 const RegMask &dmask = def->out_RegMask(); 1008 const RegMask &umask = n->in_RegMask(inpidx); 1009 bool is_vect = RegMask::is_vector(def->ideal_reg()); 1010 assert(inpidx < oopoff, "cannot use-split oop map info"); 1011 1012 bool dup = UPblock[slidx]; 1013 bool uup = umask.is_UP(); 1014 1015 // Need special logic to handle bound USES. Insert a split at this 1016 // bound use if we can't rematerialize the def, or if we need the 1017 // split to form a misaligned pair. 1018 if( !umask.is_AllStack() && 1019 (int)umask.Size() <= lrgs(useidx).num_regs() && 1020 (!def->rematerialize() || 1021 !is_vect && umask.is_misaligned_pair())) { 1022 // These need a Split regardless of overlap or pressure 1023 // SPLIT - NO DEF - NO CISC SPILL 1024 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx); 1025 // If it wasn't split bail 1026 if (!maxlrg) { 1027 return 0; 1028 } 1029 insidx++; // Reset iterator to skip USE side split 1030 continue; 1031 } 1032 1033 if (UseFPUForSpilling && n->is_MachCall() && !uup && !dup ) { 1034 // The use at the call can force the def down so insert 1035 // a split before the use to allow the def more freedom. 1036 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx); 1037 // If it wasn't split bail 1038 if (!maxlrg) { 1039 return 0; 1040 } 1041 insidx++; // Reset iterator to skip USE side split 1042 continue; 1043 } 1044 1045 // Here is the logic chart which describes USE Splitting: 1046 // 0 = false or DOWN, 1 = true or UP 1047 // 1048 // Overlap | DEF | USE | Action 1049 //------------------------------------------------------- 1050 // 0 | 0 | 0 | Copy - mem -> mem 1051 // 0 | 0 | 1 | Split-UP - Check HRP 1052 // 0 | 1 | 0 | Split-DOWN - Debug Info? 1053 // 0 | 1 | 1 | Copy - reg -> reg 1054 // 1 | 0 | 0 | Reset Input Edge (no Split) 1055 // 1 | 0 | 1 | Split-UP - Check HRP 1056 // 1 | 1 | 0 | Split-DOWN - Debug Info? 1057 // 1 | 1 | 1 | Reset Input Edge (no Split) 1058 // 1059 // So, if (dup == uup), then overlap test determines action, 1060 // with true being no split, and false being copy. Else, 1061 // if DEF is DOWN, Split-UP, and check HRP to decide on 1062 // resetting DEF. Finally if DEF is UP, Split-DOWN, with 1063 // special handling for Debug Info. 1064 if( dup == uup ) { 1065 if( dmask.overlap(umask) ) { 1066 // Both are either up or down, and there is overlap, No Split 1067 n->set_req(inpidx, def); 1068 } 1069 else { // Both are either up or down, and there is no overlap 1070 if( dup ) { // If UP, reg->reg copy 1071 // COPY ACROSS HERE - NO DEF - NO CISC SPILL 1072 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx); 1073 // If it wasn't split bail 1074 if (!maxlrg) { 1075 return 0; 1076 } 1077 insidx++; // Reset iterator to skip USE side split 1078 } 1079 else { // DOWN, mem->mem copy 1080 // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL 1081 // First Split-UP to move value into Register 1082 uint def_ideal = def->ideal_reg(); 1083 const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal]; 1084 Node *spill = new (C) MachSpillCopyNode(def, dmask, *tmp_rm); 1085 insert_proj( b, insidx, spill, maxlrg ); 1086 // Then Split-DOWN as if previous Split was DEF 1087 maxlrg = split_USE(spill,b,n,inpidx,maxlrg,false,false, splits,slidx); 1088 // If it wasn't split bail 1089 if (!maxlrg) { 1090 return 0; 1091 } 1092 insidx += 2; // Reset iterator to skip USE side splits 1093 } 1094 } // End else no overlap 1095 } // End if dup == uup 1096 // dup != uup, so check dup for direction of Split 1097 else { 1098 if( dup ) { // If UP, Split-DOWN and check Debug Info 1099 // If this node is already a SpillCopy, just patch the edge 1100 // except the case of spilling to stack. 1101 if( n->is_SpillCopy() ) { 1102 RegMask tmp_rm(umask); 1103 tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask); 1104 if( dmask.overlap(tmp_rm) ) { 1105 if( def != n->in(inpidx) ) { 1106 n->set_req(inpidx, def); 1107 } 1108 continue; 1109 } 1110 } 1111 // COPY DOWN HERE - NO DEF - NO CISC SPILL 1112 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx); 1113 // If it wasn't split bail 1114 if (!maxlrg) { 1115 return 0; 1116 } 1117 insidx++; // Reset iterator to skip USE side split 1118 // Check for debug-info split. Capture it for later 1119 // debug splits of the same value 1120 if (jvms && jvms->debug_start() <= inpidx && inpidx < oopoff) 1121 debug_defs[slidx] = n->in(inpidx); 1122 1123 } 1124 else { // DOWN, Split-UP and check register pressure 1125 if( is_high_pressure( b, &lrgs(useidx), insidx ) ) { 1126 // COPY UP HERE - NO DEF - CISC SPILL 1127 maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,true, splits,slidx); 1128 // If it wasn't split bail 1129 if (!maxlrg) { 1130 return 0; 1131 } 1132 insidx++; // Reset iterator to skip USE side split 1133 } else { // LRP 1134 // COPY UP HERE - WITH DEF - NO CISC SPILL 1135 maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,false, splits,slidx); 1136 // If it wasn't split bail 1137 if (!maxlrg) { 1138 return 0; 1139 } 1140 // Flag this lift-up in a low-pressure block as 1141 // already-spilled, so if it spills again it will 1142 // spill hard (instead of not spilling hard and 1143 // coalescing away). 1144 set_was_spilled(n->in(inpidx)); 1145 // Since this is a new DEF, update Reachblock & UP 1146 Reachblock[slidx] = n->in(inpidx); 1147 UPblock[slidx] = true; 1148 insidx++; // Reset iterator to skip USE side split 1149 } 1150 } // End else DOWN 1151 } // End dup != uup 1152 } // End if Spill USE 1153 } // End For All Inputs 1154 } // End If not nullcheck 1155 1156 // ********** Handle DEFS ********** 1157 // DEFS either Split DOWN in HRP regions or when the LRG is bound, or 1158 // just reset the Reaches info in LRP regions. DEFS must always update 1159 // UP info. 1160 if( deflrg.reg() >= LRG::SPILL_REG ) { // Spilled? 1161 uint slidx = lrg2reach[defidx]; 1162 // Add to defs list for later assignment of new live range number 1163 defs->push(n); 1164 // Set a flag on the Node indicating it has already spilled. 1165 // Only do it for capacity spills not conflict spills. 1166 if( !deflrg._direct_conflict ) 1167 set_was_spilled(n); 1168 assert(!n->is_Phi(),"Cannot insert Phi into DEFS list"); 1169 // Grab UP info for DEF 1170 const RegMask &dmask = n->out_RegMask(); 1171 bool defup = dmask.is_UP(); 1172 int ireg = n->ideal_reg(); 1173 bool is_vect = RegMask::is_vector(ireg); 1174 // Only split at Def if this is a HRP block or bound (and spilled once) 1175 if( !n->rematerialize() && 1176 (((dmask.is_bound(ireg) || !is_vect && dmask.is_misaligned_pair()) && 1177 (deflrg._direct_conflict || deflrg._must_spill)) || 1178 // Check for LRG being up in a register and we are inside a high 1179 // pressure area. Spill it down immediately. 1180 (defup && is_high_pressure(b,&deflrg,insidx))) ) { 1181 assert( !n->rematerialize(), "" ); 1182 assert( !n->is_SpillCopy(), "" ); 1183 // Do a split at the def site. 1184 maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx ); 1185 // If it wasn't split bail 1186 if (!maxlrg) { 1187 return 0; 1188 } 1189 // Split DEF's Down 1190 UPblock[slidx] = 0; 1191 #ifndef PRODUCT 1192 // DEBUG 1193 if( trace_spilling() ) { 1194 tty->print("\nNew Split DOWN DEF of Spill Idx "); 1195 tty->print("%d, UP %d:\n",slidx,false); 1196 n->dump(); 1197 } 1198 #endif 1199 } 1200 else { // Neither bound nor HRP, must be LRP 1201 // otherwise, just record the def 1202 Reachblock[slidx] = n; 1203 // UP should come from the outRegmask() of the DEF 1204 UPblock[slidx] = defup; 1205 // Update debug list of reaching down definitions, kill if DEF is UP 1206 debug_defs[slidx] = defup ? NULL : n; 1207 #ifndef PRODUCT 1208 // DEBUG 1209 if( trace_spilling() ) { 1210 tty->print("\nNew DEF of Spill Idx "); 1211 tty->print("%d, UP %d:\n",slidx,defup); 1212 n->dump(); 1213 } 1214 #endif 1215 } // End else LRP 1216 } // End if spill def 1217 1218 // ********** Split Left Over Mem-Mem Moves ********** 1219 // Check for mem-mem copies and split them now. Do not do this 1220 // to copies about to be spilled; they will be Split shortly. 1221 if (copyidx) { 1222 Node *use = n->in(copyidx); 1223 uint useidx = _lrg_map.find_id(use); 1224 if (useidx < _lrg_map.max_lrg_id() && // This is not a new split 1225 OptoReg::is_stack(deflrg.reg()) && 1226 deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack 1227 LRG &uselrg = lrgs(useidx); 1228 if( OptoReg::is_stack(uselrg.reg()) && 1229 uselrg.reg() < LRG::SPILL_REG && // USE is from stack 1230 deflrg.reg() != uselrg.reg() ) { // Not trivially removed 1231 uint def_ideal_reg = n->bottom_type()->ideal_reg(); 1232 const RegMask &def_rm = *Matcher::idealreg2regmask[def_ideal_reg]; 1233 const RegMask &use_rm = n->in_RegMask(copyidx); 1234 if( def_rm.overlap(use_rm) && n->is_SpillCopy() ) { // Bug 4707800, 'n' may be a storeSSL 1235 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { // Check when generating nodes 1236 return 0; 1237 } 1238 Node *spill = new (C) MachSpillCopyNode(use,use_rm,def_rm); 1239 n->set_req(copyidx,spill); 1240 n->as_MachSpillCopy()->set_in_RegMask(def_rm); 1241 // Put the spill just before the copy 1242 insert_proj( b, insidx++, spill, maxlrg++ ); 1243 } 1244 } 1245 } 1246 } 1247 } // End For All Instructions in Block - Non-PHI Pass 1248 1249 // Check if each LRG is live out of this block so as not to propagate 1250 // beyond the last use of a LRG. 1251 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 1252 uint defidx = lidxs.at(slidx); 1253 IndexSet *liveout = _live->live(b); 1254 if( !liveout->member(defidx) ) { 1255 #ifdef ASSERT 1256 // The index defidx is not live. Check the liveout array to ensure that 1257 // it contains no members which compress to defidx. Finding such an 1258 // instance may be a case to add liveout adjustment in compress_uf_map(). 1259 // See 5063219. 1260 uint member; 1261 IndexSetIterator isi(liveout); 1262 while ((member = isi.next()) != 0) { 1263 assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed"); 1264 } 1265 #endif 1266 Reachblock[slidx] = NULL; 1267 } else { 1268 assert(Reachblock[slidx] != NULL,"No reaching definition for liveout value"); 1269 } 1270 } 1271 #ifndef PRODUCT 1272 if( trace_spilling() ) 1273 b->dump(); 1274 #endif 1275 } // End For All Blocks 1276 1277 //----------PASS 2---------- 1278 // Reset all DEF live range numbers here 1279 for( insidx = 0; insidx < defs->size(); insidx++ ) { 1280 // Grab the def 1281 n1 = defs->at(insidx); 1282 // Set new lidx for DEF 1283 new_lrg(n1, maxlrg++); 1284 } 1285 //----------Phi Node Splitting---------- 1286 // Clean up a phi here, and assign a new live range number 1287 // Cycle through this block's predecessors, collecting Reaches 1288 // info for each spilled LRG and update edges. 1289 // Walk the phis list to patch inputs, split phis, and name phis 1290 uint lrgs_before_phi_split = maxlrg; 1291 for( insidx = 0; insidx < phis->size(); insidx++ ) { 1292 Node *phi = phis->at(insidx); 1293 assert(phi->is_Phi(),"This list must only contain Phi Nodes"); 1294 Block *b = _cfg._bbs[phi->_idx]; 1295 // Grab the live range number 1296 uint lidx = _lrg_map.find_id(phi); 1297 uint slidx = lrg2reach[lidx]; 1298 // Update node to lidx map 1299 new_lrg(phi, maxlrg++); 1300 // Get PASS1's up/down decision for the block. 1301 int phi_up = !!UP_entry[slidx]->test(b->_pre_order); 1302 1303 // Force down if double-spilling live range 1304 if( lrgs(lidx)._was_spilled1 ) 1305 phi_up = false; 1306 1307 // When splitting a Phi we an split it normal or "inverted". 1308 // An inverted split makes the splits target the Phi's UP/DOWN 1309 // sense inverted; then the Phi is followed by a final def-side 1310 // split to invert back. It changes which blocks the spill code 1311 // goes in. 1312 1313 // Walk the predecessor blocks and assign the reaching def to the Phi. 1314 // Split Phi nodes by placing USE side splits wherever the reaching 1315 // DEF has the wrong UP/DOWN value. 1316 for( uint i = 1; i < b->num_preds(); i++ ) { 1317 // Get predecessor block pre-order number 1318 Block *pred = _cfg._bbs[b->pred(i)->_idx]; 1319 pidx = pred->_pre_order; 1320 // Grab reaching def 1321 Node *def = Reaches[pidx][slidx]; 1322 assert( def, "must have reaching def" ); 1323 // If input up/down sense and reg-pressure DISagree 1324 if (def->rematerialize() && contains_no_live_range_input(def)) { 1325 // Place the rematerialized node above any MSCs created during 1326 // phi node splitting. end_idx points at the insertion point 1327 // so look at the node before it. 1328 int insert = pred->end_idx(); 1329 while (insert >= 1 && 1330 pred->_nodes[insert - 1]->is_SpillCopy() && 1331 _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { 1332 insert--; 1333 } 1334 def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false); 1335 if (!def) { 1336 return 0; // Bail out 1337 } 1338 } 1339 // Update the Phi's input edge array 1340 phi->set_req(i,def); 1341 // Grab the UP/DOWN sense for the input 1342 u1 = UP[pidx][slidx]; 1343 if( u1 != (phi_up != 0)) { 1344 maxlrg = split_USE(def, b, phi, i, maxlrg, !u1, false, splits,slidx); 1345 // If it wasn't split bail 1346 if (!maxlrg) { 1347 return 0; 1348 } 1349 } 1350 } // End for all inputs to the Phi 1351 } // End for all Phi Nodes 1352 // Update _maxlrg to save Union asserts 1353 _lrg_map.set_max_lrg_id(maxlrg); 1354 1355 1356 //----------PASS 3---------- 1357 // Pass over all Phi's to union the live ranges 1358 for( insidx = 0; insidx < phis->size(); insidx++ ) { 1359 Node *phi = phis->at(insidx); 1360 assert(phi->is_Phi(),"This list must only contain Phi Nodes"); 1361 // Walk all inputs to Phi and Union input live range with Phi live range 1362 for( uint i = 1; i < phi->req(); i++ ) { 1363 // Grab the input node 1364 Node *n = phi->in(i); 1365 assert(n, "node should exist"); 1366 uint lidx = _lrg_map.find(n); 1367 uint pidx = _lrg_map.find(phi); 1368 if (lidx < pidx) { 1369 Union(n, phi); 1370 } 1371 else if(lidx > pidx) { 1372 Union(phi, n); 1373 } 1374 } // End for all inputs to the Phi Node 1375 } // End for all Phi Nodes 1376 // Now union all two address instructions 1377 for (insidx = 0; insidx < defs->size(); insidx++) { 1378 // Grab the def 1379 n1 = defs->at(insidx); 1380 // Set new lidx for DEF & handle 2-addr instructions 1381 if (n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0)) { 1382 assert(_lrg_map.find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index"); 1383 // Union the input and output live ranges 1384 uint lr1 = _lrg_map.find(n1); 1385 uint lr2 = _lrg_map.find(n1->in(twoidx)); 1386 if (lr1 < lr2) { 1387 Union(n1, n1->in(twoidx)); 1388 } 1389 else if (lr1 > lr2) { 1390 Union(n1->in(twoidx), n1); 1391 } 1392 } // End if two address 1393 } // End for all defs 1394 // DEBUG 1395 #ifdef ASSERT 1396 // Validate all live range index assignments 1397 for (bidx = 0; bidx < _cfg._num_blocks; bidx++) { 1398 b = _cfg._blocks[bidx]; 1399 for (insidx = 0; insidx <= b->end_idx(); insidx++) { 1400 Node *n = b->_nodes[insidx]; 1401 uint defidx = _lrg_map.find(n); 1402 assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split"); 1403 assert(defidx < maxlrg,"Bad live range index in Split"); 1404 } 1405 } 1406 // Issue a warning if splitting made no progress 1407 int noprogress = 0; 1408 for (slidx = 0; slidx < spill_cnt; slidx++) { 1409 if (PrintOpto && WizardMode && splits.at(slidx) == 0) { 1410 tty->print_cr("Failed to split live range %d", lidxs.at(slidx)); 1411 //BREAKPOINT; 1412 } 1413 else { 1414 noprogress++; 1415 } 1416 } 1417 if(!noprogress) { 1418 tty->print_cr("Failed to make progress in Split"); 1419 //BREAKPOINT; 1420 } 1421 #endif 1422 // Return updated count of live ranges 1423 return maxlrg; 1424 }