1 /* 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/chaitin.hpp" 33 #include "opto/loopnode.hpp" 34 #include "opto/machnode.hpp" 35 36 //------------------------------Split-------------------------------------- 37 // Walk the graph in RPO and for each lrg which spills, propagate reaching 38 // definitions. During propagation, split the live range around regions of 39 // High Register Pressure (HRP). If a Def is in a region of Low Register 40 // Pressure (LRP), it will not get spilled until we encounter a region of 41 // HRP between it and one of its uses. We will spill at the transition 42 // point between LRP and HRP. Uses in the HRP region will use the spilled 43 // Def. The first Use outside the HRP region will generate a SpillCopy to 44 // hoist the live range back up into a register, and all subsequent uses 45 // will use that new Def until another HRP region is encountered. Defs in 46 // HRP regions will get trailing SpillCopies to push the LRG down into the 47 // stack immediately. 48 // 49 // As a side effect, unlink from (hence make dead) coalesced copies. 50 // 51 52 static const char out_of_nodes[] = "out of nodes during split"; 53 54 static bool contains_no_live_range_input(const Node* def) { 55 for (uint i = 1; i < def->req(); ++i) { 56 if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) { 57 return false; 58 } 59 } 60 return true; 61 } 62 63 //------------------------------get_spillcopy_wide----------------------------- 64 // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the 65 // wide ideal-register spill-mask if possible. If the 'wide-mask' does 66 // not cover the input (or output), use the input (or output) mask instead. 67 Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) { 68 // If ideal reg doesn't exist we've got a bad schedule happening 69 // that is forcing us to spill something that isn't spillable. 70 // Bail rather than abort 71 int ireg = def->ideal_reg(); 72 if( ireg == 0 || ireg == Op_RegFlags ) { 73 assert(false, "attempted to spill a non-spillable item"); 74 C->record_method_not_compilable("attempted to spill a non-spillable item"); 75 return NULL; 76 } 77 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 78 return NULL; 79 } 80 const RegMask *i_mask = &def->out_RegMask(); 81 const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg]; 82 const RegMask *o_mask = use ? &use->in_RegMask(uidx) : w_mask; 83 const RegMask *w_i_mask = w_mask->overlap( *i_mask ) ? w_mask : i_mask; 84 const RegMask *w_o_mask; 85 86 int num_regs = RegMask::num_registers(ireg); 87 bool is_vect = RegMask::is_vector(ireg); 88 if( w_mask->overlap( *o_mask ) && // Overlap AND 89 ((num_regs == 1) // Single use or aligned 90 || is_vect // or vector 91 || !is_vect && o_mask->is_aligned_pairs()) ) { 92 assert(!is_vect || o_mask->is_aligned_sets(num_regs), "vectors are aligned"); 93 // Don't come here for mis-aligned doubles 94 w_o_mask = w_mask; 95 } else { // wide ideal mask does not overlap with o_mask 96 // Mis-aligned doubles come here and XMM->FPR moves on x86. 97 w_o_mask = o_mask; // Must target desired registers 98 // Does the ideal-reg-mask overlap with o_mask? I.e., can I use 99 // a reg-reg move or do I need a trip across register classes 100 // (and thus through memory)? 101 if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() ) 102 // Here we assume a trip through memory is required. 103 w_i_mask = &C->FIRST_STACK_mask(); 104 } 105 return new (C) MachSpillCopyNode( def, *w_i_mask, *w_o_mask ); 106 } 107 108 //------------------------------insert_proj------------------------------------ 109 // Insert the spill at chosen location. Skip over any intervening Proj's or 110 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block 111 // instead. Update high-pressure indices. Create a new live range. 112 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) { 113 // Skip intervening ProjNodes. Do not insert between a ProjNode and 114 // its definer. 115 while( i < b->_nodes.size() && 116 (b->_nodes[i]->is_Proj() || 117 b->_nodes[i]->is_Phi() ) ) 118 i++; 119 120 // Do not insert between a call and his Catch 121 if( b->_nodes[i]->is_Catch() ) { 122 // Put the instruction at the top of the fall-thru block. 123 // Find the fall-thru projection 124 while( 1 ) { 125 const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj(); 126 if( cp->_con == CatchProjNode::fall_through_index ) 127 break; 128 } 129 int sidx = i - b->end_idx()-1; 130 b = b->_succs[sidx]; // Switch to successor block 131 i = 1; // Right at start of block 132 } 133 134 b->_nodes.insert(i,spill); // Insert node in block 135 _cfg._bbs.map(spill->_idx,b); // Update node->block mapping to reflect 136 // Adjust the point where we go hi-pressure 137 if( i <= b->_ihrp_index ) b->_ihrp_index++; 138 if( i <= b->_fhrp_index ) b->_fhrp_index++; 139 140 // Assign a new Live Range Number to the SpillCopy and grow 141 // the node->live range mapping. 142 new_lrg(spill,maxlrg); 143 } 144 145 //------------------------------split_DEF-------------------------------------- 146 // There are four categories of Split; UP/DOWN x DEF/USE 147 // Only three of these really occur as DOWN/USE will always color 148 // Any Split with a DEF cannot CISC-Spill now. Thus we need 149 // two helper routines, one for Split DEFS (insert after instruction), 150 // one for Split USES (insert before instruction). DEF insertion 151 // happens inside Split, where the Leaveblock array is updated. 152 uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx ) { 153 #ifdef ASSERT 154 // Increment the counter for this lrg 155 splits.at_put(slidx, splits.at(slidx)+1); 156 #endif 157 // If we are spilling the memory op for an implicit null check, at the 158 // null check location (ie - null check is in HRP block) we need to do 159 // the null-check first, then spill-down in the following block. 160 // (The implicit_null_check function ensures the use is also dominated 161 // by the branch-not-taken block.) 162 Node *be = b->end(); 163 if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) { 164 // Spill goes in the branch-not-taken block 165 b = b->_succs[b->_nodes[b->end_idx()+1]->Opcode() == Op_IfTrue]; 166 loc = 0; // Just past the Region 167 } 168 assert( loc >= 0, "must insert past block head" ); 169 170 // Get a def-side SpillCopy 171 Node *spill = get_spillcopy_wide(def,NULL,0); 172 // Did we fail to split?, then bail 173 if (!spill) { 174 return 0; 175 } 176 177 // Insert the spill at chosen location 178 insert_proj( b, loc+1, spill, maxlrg++); 179 180 // Insert new node into Reaches array 181 Reachblock[slidx] = spill; 182 // Update debug list of reaching down definitions by adding this one 183 debug_defs[slidx] = spill; 184 185 // return updated count of live ranges 186 return maxlrg; 187 } 188 189 //------------------------------split_USE-------------------------------------- 190 // Splits at uses can involve redeffing the LRG, so no CISC Spilling there. 191 // Debug uses want to know if def is already stack enabled. 192 uint PhaseChaitin::split_USE( Node *def, Block *b, Node *use, uint useidx, uint maxlrg, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx ) { 193 #ifdef ASSERT 194 // Increment the counter for this lrg 195 splits.at_put(slidx, splits.at(slidx)+1); 196 #endif 197 198 // Some setup stuff for handling debug node uses 199 JVMState* jvms = use->jvms(); 200 uint debug_start = jvms ? jvms->debug_start() : 999999; 201 uint debug_end = jvms ? jvms->debug_end() : 999999; 202 203 //------------------------------------------- 204 // Check for use of debug info 205 if (useidx >= debug_start && useidx < debug_end) { 206 // Actually it's perfectly legal for constant debug info to appear 207 // just unlikely. In this case the optimizer left a ConI of a 4 208 // as both inputs to a Phi with only a debug use. It's a single-def 209 // live range of a rematerializable value. The live range spills, 210 // rematerializes and now the ConI directly feeds into the debug info. 211 // assert(!def->is_Con(), "constant debug info already constructed directly"); 212 213 // Special split handling for Debug Info 214 // If DEF is DOWN, just hook the edge and return 215 // If DEF is UP, Split it DOWN for this USE. 216 if( def->is_Mach() ) { 217 if( def_down ) { 218 // DEF is DOWN, so connect USE directly to the DEF 219 use->set_req(useidx, def); 220 } else { 221 // Block and index where the use occurs. 222 Block *b = _cfg._bbs[use->_idx]; 223 // Put the clone just prior to use 224 int bindex = b->find_node(use); 225 // DEF is UP, so must copy it DOWN and hook in USE 226 // Insert SpillCopy before the USE, which uses DEF as its input, 227 // and defs a new live range, which is used by this node. 228 Node *spill = get_spillcopy_wide(def,use,useidx); 229 // did we fail to split? 230 if (!spill) { 231 // Bail 232 return 0; 233 } 234 // insert into basic block 235 insert_proj( b, bindex, spill, maxlrg++ ); 236 // Use the new split 237 use->set_req(useidx,spill); 238 } 239 // No further split handling needed for this use 240 return maxlrg; 241 } // End special splitting for debug info live range 242 } // If debug info 243 244 // CISC-SPILLING 245 // Finally, check to see if USE is CISC-Spillable, and if so, 246 // gather_lrg_masks will add the flags bit to its mask, and 247 // no use side copy is needed. This frees up the live range 248 // register choices without causing copy coalescing, etc. 249 if( UseCISCSpill && cisc_sp ) { 250 int inp = use->cisc_operand(); 251 if( inp != AdlcVMDeps::Not_cisc_spillable ) 252 // Convert operand number to edge index number 253 inp = use->as_Mach()->operand_index(inp); 254 if( inp == (int)useidx ) { 255 use->set_req(useidx, def); 256 #ifndef PRODUCT 257 if( TraceCISCSpill ) { 258 tty->print(" set_split: "); 259 use->dump(); 260 } 261 #endif 262 return maxlrg; 263 } 264 } 265 266 //------------------------------------------- 267 // Insert a Copy before the use 268 269 // Block and index where the use occurs. 270 int bindex; 271 // Phi input spill-copys belong at the end of the prior block 272 if( use->is_Phi() ) { 273 b = _cfg._bbs[b->pred(useidx)->_idx]; 274 bindex = b->end_idx(); 275 } else { 276 // Put the clone just prior to use 277 bindex = b->find_node(use); 278 } 279 280 Node *spill = get_spillcopy_wide( def, use, useidx ); 281 if( !spill ) return 0; // Bailed out 282 // Insert SpillCopy before the USE, which uses the reaching DEF as 283 // its input, and defs a new live range, which is used by this node. 284 insert_proj( b, bindex, spill, maxlrg++ ); 285 // Use the spill/clone 286 use->set_req(useidx,spill); 287 288 // return updated live range count 289 return maxlrg; 290 } 291 292 //------------------------------clone_node---------------------------- 293 // Clone node with anti dependence check. 294 Node* clone_node(Node* def, Block *b, Compile* C) { 295 if (def->needs_anti_dependence_check()) { 296 #ifdef ASSERT 297 if (Verbose) { 298 tty->print_cr("RA attempts to clone node with anti_dependence:"); 299 def->dump(-1); tty->cr(); 300 tty->print_cr("into block:"); 301 b->dump(); 302 } 303 #endif 304 if (C->subsume_loads() == true && !C->failing()) { 305 // Retry with subsume_loads == false 306 // If this is the first failure, the sentinel string will "stick" 307 // to the Compile object, and the C2Compiler will see it and retry. 308 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 309 } else { 310 // Bailout without retry 311 C->record_method_not_compilable("RA Split failed: attempt to clone node with anti_dependence"); 312 } 313 return 0; 314 } 315 return def->clone(); 316 } 317 318 //------------------------------split_Rematerialize---------------------------- 319 // Clone a local copy of the def. 320 Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ) { 321 // The input live ranges will be stretched to the site of the new 322 // instruction. They might be stretched past a def and will thus 323 // have the old and new values of the same live range alive at the 324 // same time - a definite no-no. Split out private copies of 325 // the inputs. 326 if( def->req() > 1 ) { 327 for( uint i = 1; i < def->req(); i++ ) { 328 Node *in = def->in(i); 329 // Check for single-def (LRG cannot redefined) 330 uint lidx = _lrg_map.live_range_id(in); 331 if (lidx >= _lrg_map.max_lrg_id()) { 332 continue; // Value is a recent spill-copy 333 } 334 if (lrgs(lidx).is_singledef()) { 335 continue; 336 } 337 338 Block *b_def = _cfg._bbs[def->_idx]; 339 int idx_def = b_def->find_node(def); 340 Node *in_spill = get_spillcopy_wide( in, def, i ); 341 if( !in_spill ) return 0; // Bailed out 342 insert_proj(b_def,idx_def,in_spill,maxlrg++); 343 if( b_def == b ) 344 insidx++; 345 def->set_req(i,in_spill); 346 } 347 } 348 349 Node *spill = clone_node(def, b, C); 350 if (spill == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 351 // Check when generating nodes 352 return 0; 353 } 354 355 // See if any inputs are currently being spilled, and take the 356 // latest copy of spilled inputs. 357 if( spill->req() > 1 ) { 358 for( uint i = 1; i < spill->req(); i++ ) { 359 Node *in = spill->in(i); 360 uint lidx = _lrg_map.find_id(in); 361 362 // Walk backwards thru spill copy node intermediates 363 if (walkThru) { 364 while (in->is_SpillCopy() && lidx >= _lrg_map.max_lrg_id()) { 365 in = in->in(1); 366 lidx = _lrg_map.find_id(in); 367 } 368 369 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_multidef()) { 370 // walkThru found a multidef LRG, which is unsafe to use, so 371 // just keep the original def used in the clone. 372 in = spill->in(i); 373 lidx = _lrg_map.find_id(in); 374 } 375 } 376 377 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) { 378 Node *rdef = Reachblock[lrg2reach[lidx]]; 379 if (rdef) { 380 spill->set_req(i, rdef); 381 } 382 } 383 } 384 } 385 386 387 assert( spill->out_RegMask().is_UP(), "rematerialize to a reg" ); 388 // Rematerialized op is def->spilled+1 389 set_was_spilled(spill); 390 if( _spilled_once.test(def->_idx) ) 391 set_was_spilled(spill); 392 393 insert_proj( b, insidx, spill, maxlrg++ ); 394 #ifdef ASSERT 395 // Increment the counter for this lrg 396 splits.at_put(slidx, splits.at(slidx)+1); 397 #endif 398 // See if the cloned def kills any flags, and copy those kills as well 399 uint i = insidx+1; 400 int found_projs = clone_projs( b, i, def, spill, maxlrg); 401 if (found_projs > 0) { 402 // Adjust the point where we go hi-pressure 403 if (i <= b->_ihrp_index) { 404 b->_ihrp_index += found_projs; 405 } 406 if (i <= b->_fhrp_index) { 407 b->_fhrp_index += found_projs; 408 } 409 } 410 411 return spill; 412 } 413 414 //------------------------------is_high_pressure------------------------------- 415 // Function to compute whether or not this live range is "high pressure" 416 // in this block - whether it spills eagerly or not. 417 bool PhaseChaitin::is_high_pressure( Block *b, LRG *lrg, uint insidx ) { 418 if( lrg->_was_spilled1 ) return true; 419 // Forced spilling due to conflict? Then split only at binding uses 420 // or defs, not for supposed capacity problems. 421 // CNC - Turned off 7/8/99, causes too much spilling 422 // if( lrg->_is_bound ) return false; 423 424 // Use float pressure numbers for vectors. 425 bool is_float_or_vector = lrg->_is_float || lrg->_is_vector; 426 // Not yet reached the high-pressure cutoff point, so low pressure 427 uint hrp_idx = is_float_or_vector ? b->_fhrp_index : b->_ihrp_index; 428 if( insidx < hrp_idx ) return false; 429 // Register pressure for the block as a whole depends on reg class 430 int block_pres = is_float_or_vector ? b->_freg_pressure : b->_reg_pressure; 431 // Bound live ranges will split at the binding points first; 432 // Intermediate splits should assume the live range's register set 433 // got "freed up" and that num_regs will become INT_PRESSURE. 434 int bound_pres = is_float_or_vector ? FLOATPRESSURE : INTPRESSURE; 435 // Effective register pressure limit. 436 int lrg_pres = (lrg->get_invalid_mask_size() > lrg->num_regs()) 437 ? (lrg->get_invalid_mask_size() >> (lrg->num_regs()-1)) : bound_pres; 438 // High pressure if block pressure requires more register freedom 439 // than live range has. 440 return block_pres >= lrg_pres; 441 } 442 443 444 //------------------------------prompt_use--------------------------------- 445 // True if lidx is used before any real register is def'd in the block 446 bool PhaseChaitin::prompt_use( Block *b, uint lidx ) { 447 if (lrgs(lidx)._was_spilled2) { 448 return false; 449 } 450 451 // Scan block for 1st use. 452 for( uint i = 1; i <= b->end_idx(); i++ ) { 453 Node *n = b->_nodes[i]; 454 // Ignore PHI use, these can be up or down 455 if (n->is_Phi()) { 456 continue; 457 } 458 for (uint j = 1; j < n->req(); j++) { 459 if (_lrg_map.find_id(n->in(j)) == lidx) { 460 return true; // Found 1st use! 461 } 462 } 463 if (n->out_RegMask().is_NotEmpty()) { 464 return false; 465 } 466 } 467 return false; 468 } 469 470 //------------------------------Split-------------------------------------- 471 //----------Split Routine---------- 472 // ***** NEW SPLITTING HEURISTIC ***** 473 // DEFS: If the DEF is in a High Register Pressure(HRP) Block, split there. 474 // Else, no split unless there is a HRP block between a DEF and 475 // one of its uses, and then split at the HRP block. 476 // 477 // USES: If USE is in HRP, split at use to leave main LRG on stack. 478 // Else, hoist LRG back up to register only (ie - split is also DEF) 479 // We will compute a new maxlrg as we go 480 uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { 481 NOT_PRODUCT( Compile::TracePhase t3("regAllocSplit", &_t_regAllocSplit, TimeCompiler); ) 482 483 // Free thread local resources used by this method on exit. 484 ResourceMark rm(split_arena); 485 486 uint bidx, pidx, slidx, insidx, inpidx, twoidx; 487 uint non_phi = 1, spill_cnt = 0; 488 Node **Reachblock; 489 Node *n1, *n2, *n3; 490 Node_List *defs,*phis; 491 bool *UPblock; 492 bool u1, u2, u3; 493 Block *b, *pred; 494 PhiNode *phi; 495 GrowableArray<uint> lidxs(split_arena, maxlrg, 0, 0); 496 497 // Array of counters to count splits per live range 498 GrowableArray<uint> splits(split_arena, maxlrg, 0, 0); 499 500 #define NEW_SPLIT_ARRAY(type, size)\ 501 (type*) split_arena->allocate_bytes((size) * sizeof(type)) 502 503 //----------Setup Code---------- 504 // Create a convenient mapping from lrg numbers to reaches/leaves indices 505 uint *lrg2reach = NEW_SPLIT_ARRAY(uint, maxlrg); 506 // Keep track of DEFS & Phis for later passes 507 defs = new Node_List(); 508 phis = new Node_List(); 509 // Gather info on which LRG's are spilling, and build maps 510 for (bidx = 1; bidx < maxlrg; bidx++) { 511 if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) { 512 assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color"); 513 lrg2reach[bidx] = spill_cnt; 514 spill_cnt++; 515 lidxs.append(bidx); 516 #ifdef ASSERT 517 // Initialize the split counts to zero 518 splits.append(0); 519 #endif 520 #ifndef PRODUCT 521 if( PrintOpto && WizardMode && lrgs(bidx)._was_spilled1 ) 522 tty->print_cr("Warning, 2nd spill of L%d",bidx); 523 #endif 524 } 525 } 526 527 // Create side arrays for propagating reaching defs info. 528 // Each block needs a node pointer for each spilling live range for the 529 // Def which is live into the block. Phi nodes handle multiple input 530 // Defs by querying the output of their predecessor blocks and resolving 531 // them to a single Def at the phi. The pointer is updated for each 532 // Def in the block, and then becomes the output for the block when 533 // processing of the block is complete. We also need to track whether 534 // a Def is UP or DOWN. UP means that it should get a register (ie - 535 // it is always in LRP regions), and DOWN means that it is probably 536 // on the stack (ie - it crosses HRP regions). 537 Node ***Reaches = NEW_SPLIT_ARRAY( Node**, _cfg._num_blocks+1 ); 538 bool **UP = NEW_SPLIT_ARRAY( bool*, _cfg._num_blocks+1 ); 539 Node **debug_defs = NEW_SPLIT_ARRAY( Node*, spill_cnt ); 540 VectorSet **UP_entry= NEW_SPLIT_ARRAY( VectorSet*, spill_cnt ); 541 542 // Initialize Reaches & UP 543 for( bidx = 0; bidx < _cfg._num_blocks+1; bidx++ ) { 544 Reaches[bidx] = NEW_SPLIT_ARRAY( Node*, spill_cnt ); 545 UP[bidx] = NEW_SPLIT_ARRAY( bool, spill_cnt ); 546 Node **Reachblock = Reaches[bidx]; 547 bool *UPblock = UP[bidx]; 548 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 549 UPblock[slidx] = true; // Assume they start in registers 550 Reachblock[slidx] = NULL; // Assume that no def is present 551 } 552 } 553 554 #undef NEW_SPLIT_ARRAY 555 556 // Initialize to array of empty vectorsets 557 for( slidx = 0; slidx < spill_cnt; slidx++ ) 558 UP_entry[slidx] = new VectorSet(split_arena); 559 560 //----------PASS 1---------- 561 //----------Propagation & Node Insertion Code---------- 562 // Walk the Blocks in RPO for DEF & USE info 563 for( bidx = 0; bidx < _cfg._num_blocks; bidx++ ) { 564 565 if (C->check_node_count(spill_cnt, out_of_nodes)) { 566 return 0; 567 } 568 569 b = _cfg._blocks[bidx]; 570 // Reaches & UP arrays for this block 571 Reachblock = Reaches[b->_pre_order]; 572 UPblock = UP[b->_pre_order]; 573 // Reset counter of start of non-Phi nodes in block 574 non_phi = 1; 575 //----------Block Entry Handling---------- 576 // Check for need to insert a new phi 577 // Cycle through this block's predecessors, collecting Reaches 578 // info for each spilled LRG. If they are identical, no phi is 579 // needed. If they differ, check for a phi, and insert if missing, 580 // or update edges if present. Set current block's Reaches set to 581 // be either the phi's or the reaching def, as appropriate. 582 // If no Phi is needed, check if the LRG needs to spill on entry 583 // to the block due to HRP. 584 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 585 // Grab the live range number 586 uint lidx = lidxs.at(slidx); 587 // Do not bother splitting or putting in Phis for single-def 588 // rematerialized live ranges. This happens alot to constants 589 // with long live ranges. 590 if( lrgs(lidx).is_singledef() && 591 lrgs(lidx)._def->rematerialize() ) { 592 // reset the Reaches & UP entries 593 Reachblock[slidx] = lrgs(lidx)._def; 594 UPblock[slidx] = true; 595 // Record following instruction in case 'n' rematerializes and 596 // kills flags 597 Block *pred1 = _cfg._bbs[b->pred(1)->_idx]; 598 continue; 599 } 600 601 // Initialize needs_phi and needs_split 602 bool needs_phi = false; 603 bool needs_split = false; 604 bool has_phi = false; 605 // Walk the predecessor blocks to check inputs for that live range 606 // Grab predecessor block header 607 n1 = b->pred(1); 608 // Grab the appropriate reaching def info for inpidx 609 pred = _cfg._bbs[n1->_idx]; 610 pidx = pred->_pre_order; 611 Node **Ltmp = Reaches[pidx]; 612 bool *Utmp = UP[pidx]; 613 n1 = Ltmp[slidx]; 614 u1 = Utmp[slidx]; 615 // Initialize node for saving type info 616 n3 = n1; 617 u3 = u1; 618 619 // Compare inputs to see if a Phi is needed 620 for( inpidx = 2; inpidx < b->num_preds(); inpidx++ ) { 621 // Grab predecessor block headers 622 n2 = b->pred(inpidx); 623 // Grab the appropriate reaching def info for inpidx 624 pred = _cfg._bbs[n2->_idx]; 625 pidx = pred->_pre_order; 626 Ltmp = Reaches[pidx]; 627 Utmp = UP[pidx]; 628 n2 = Ltmp[slidx]; 629 u2 = Utmp[slidx]; 630 // For each LRG, decide if a phi is necessary 631 if( n1 != n2 ) { 632 needs_phi = true; 633 } 634 // See if the phi has mismatched inputs, UP vs. DOWN 635 if( n1 && n2 && (u1 != u2) ) { 636 needs_split = true; 637 } 638 // Move n2/u2 to n1/u1 for next iteration 639 n1 = n2; 640 u1 = u2; 641 // Preserve a non-NULL predecessor for later type referencing 642 if( (n3 == NULL) && (n2 != NULL) ){ 643 n3 = n2; 644 u3 = u2; 645 } 646 } // End for all potential Phi inputs 647 648 // check block for appropriate phinode & update edges 649 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { 650 n1 = b->_nodes[insidx]; 651 // bail if this is not a phi 652 phi = n1->is_Phi() ? n1->as_Phi() : NULL; 653 if( phi == NULL ) { 654 // Keep track of index of first non-PhiNode instruction in block 655 non_phi = insidx; 656 // break out of the for loop as we have handled all phi nodes 657 break; 658 } 659 // must be looking at a phi 660 if (_lrg_map.find_id(n1) == lidxs.at(slidx)) { 661 // found the necessary phi 662 needs_phi = false; 663 has_phi = true; 664 // initialize the Reaches entry for this LRG 665 Reachblock[slidx] = phi; 666 break; 667 } // end if found correct phi 668 } // end for all phi's 669 670 // If a phi is needed or exist, check for it 671 if( needs_phi || has_phi ) { 672 // add new phinode if one not already found 673 if( needs_phi ) { 674 // create a new phi node and insert it into the block 675 // type is taken from left over pointer to a predecessor 676 assert(n3,"No non-NULL reaching DEF for a Phi"); 677 phi = new (C) PhiNode(b->head(), n3->bottom_type()); 678 // initialize the Reaches entry for this LRG 679 Reachblock[slidx] = phi; 680 681 // add node to block & node_to_block mapping 682 insert_proj(b, insidx++, phi, maxlrg++); 683 non_phi++; 684 // Reset new phi's mapping to be the spilling live range 685 _lrg_map.map(phi->_idx, lidx); 686 assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping"); 687 } // end if not found correct phi 688 // Here you have either found or created the Phi, so record it 689 assert(phi != NULL,"Must have a Phi Node here"); 690 phis->push(phi); 691 // PhiNodes should either force the LRG UP or DOWN depending 692 // on its inputs and the register pressure in the Phi's block. 693 UPblock[slidx] = true; // Assume new DEF is UP 694 // If entering a high-pressure area with no immediate use, 695 // assume Phi is DOWN 696 if( is_high_pressure( b, &lrgs(lidx), b->end_idx()) && !prompt_use(b,lidx) ) 697 UPblock[slidx] = false; 698 // If we are not split up/down and all inputs are down, then we 699 // are down 700 if( !needs_split && !u3 ) 701 UPblock[slidx] = false; 702 } // end if phi is needed 703 704 // Do not need a phi, so grab the reaching DEF 705 else { 706 // Grab predecessor block header 707 n1 = b->pred(1); 708 // Grab the appropriate reaching def info for k 709 pred = _cfg._bbs[n1->_idx]; 710 pidx = pred->_pre_order; 711 Node **Ltmp = Reaches[pidx]; 712 bool *Utmp = UP[pidx]; 713 // reset the Reaches & UP entries 714 Reachblock[slidx] = Ltmp[slidx]; 715 UPblock[slidx] = Utmp[slidx]; 716 } // end else no Phi is needed 717 } // end for all spilling live ranges 718 // DEBUG 719 #ifndef PRODUCT 720 if(trace_spilling()) { 721 tty->print("/`\nBlock %d: ", b->_pre_order); 722 tty->print("Reaching Definitions after Phi handling\n"); 723 for( uint x = 0; x < spill_cnt; x++ ) { 724 tty->print("Spill Idx %d: UP %d: Node\n",x,UPblock[x]); 725 if( Reachblock[x] ) 726 Reachblock[x]->dump(); 727 else 728 tty->print("Undefined\n"); 729 } 730 } 731 #endif 732 733 //----------Non-Phi Node Splitting---------- 734 // Since phi-nodes have now been handled, the Reachblock array for this 735 // block is initialized with the correct starting value for the defs which 736 // reach non-phi instructions in this block. Thus, process non-phi 737 // instructions normally, inserting SpillCopy nodes for all spill 738 // locations. 739 740 // Memoize any DOWN reaching definitions for use as DEBUG info 741 for( insidx = 0; insidx < spill_cnt; insidx++ ) { 742 debug_defs[insidx] = (UPblock[insidx]) ? NULL : Reachblock[insidx]; 743 if( UPblock[insidx] ) // Memoize UP decision at block start 744 UP_entry[insidx]->set( b->_pre_order ); 745 } 746 747 //----------Walk Instructions in the Block and Split---------- 748 // For all non-phi instructions in the block 749 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { 750 Node *n = b->_nodes[insidx]; 751 // Find the defining Node's live range index 752 uint defidx = _lrg_map.find_id(n); 753 uint cnt = n->req(); 754 755 if (n->is_Phi()) { 756 // Skip phi nodes after removing dead copies. 757 if (defidx < _lrg_map.max_lrg_id()) { 758 // Check for useless Phis. These appear if we spill, then 759 // coalesce away copies. Dont touch Phis in spilling live 760 // ranges; they are busy getting modifed in this pass. 761 if( lrgs(defidx).reg() < LRG::SPILL_REG ) { 762 uint i; 763 Node *u = NULL; 764 // Look for the Phi merging 2 unique inputs 765 for( i = 1; i < cnt; i++ ) { 766 // Ignore repeats and self 767 if( n->in(i) != u && n->in(i) != n ) { 768 // Found a unique input 769 if( u != NULL ) // If it's the 2nd, bail out 770 break; 771 u = n->in(i); // Else record it 772 } 773 } 774 assert( u, "at least 1 valid input expected" ); 775 if (i >= cnt) { // Found one unique input 776 assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg"); 777 n->replace_by(u); // Then replace with unique input 778 n->disconnect_inputs(NULL, C); 779 b->_nodes.remove(insidx); 780 insidx--; 781 b->_ihrp_index--; 782 b->_fhrp_index--; 783 } 784 } 785 } 786 continue; 787 } 788 assert( insidx > b->_ihrp_index || 789 (b->_reg_pressure < (uint)INTPRESSURE) || 790 b->_ihrp_index > 4000000 || 791 b->_ihrp_index >= b->end_idx() || 792 !b->_nodes[b->_ihrp_index]->is_Proj(), "" ); 793 assert( insidx > b->_fhrp_index || 794 (b->_freg_pressure < (uint)FLOATPRESSURE) || 795 b->_fhrp_index > 4000000 || 796 b->_fhrp_index >= b->end_idx() || 797 !b->_nodes[b->_fhrp_index]->is_Proj(), "" ); 798 799 // ********** Handle Crossing HRP Boundry ********** 800 if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) { 801 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 802 // Check for need to split at HRP boundary - split if UP 803 n1 = Reachblock[slidx]; 804 // bail out if no reaching DEF 805 if( n1 == NULL ) continue; 806 // bail out if live range is 'isolated' around inner loop 807 uint lidx = lidxs.at(slidx); 808 // If live range is currently UP 809 if( UPblock[slidx] ) { 810 // set location to insert spills at 811 // SPLIT DOWN HERE - NO CISC SPILL 812 if( is_high_pressure( b, &lrgs(lidx), insidx ) && 813 !n1->rematerialize() ) { 814 // If there is already a valid stack definition available, use it 815 if( debug_defs[slidx] != NULL ) { 816 Reachblock[slidx] = debug_defs[slidx]; 817 } 818 else { 819 // Insert point is just past last use or def in the block 820 int insert_point = insidx-1; 821 while( insert_point > 0 ) { 822 Node *n = b->_nodes[insert_point]; 823 // Hit top of block? Quit going backwards 824 if (n->is_Phi()) { 825 break; 826 } 827 // Found a def? Better split after it. 828 if (_lrg_map.live_range_id(n) == lidx) { 829 break; 830 } 831 // Look for a use 832 uint i; 833 for( i = 1; i < n->req(); i++ ) { 834 if (_lrg_map.live_range_id(n->in(i)) == lidx) { 835 break; 836 } 837 } 838 // Found a use? Better split after it. 839 if (i < n->req()) { 840 break; 841 } 842 insert_point--; 843 } 844 uint orig_eidx = b->end_idx(); 845 maxlrg = split_DEF( n1, b, insert_point, maxlrg, Reachblock, debug_defs, splits, slidx); 846 // If it wasn't split bail 847 if (!maxlrg) { 848 return 0; 849 } 850 // Spill of NULL check mem op goes into the following block. 851 if (b->end_idx() > orig_eidx) { 852 insidx++; 853 } 854 } 855 // This is a new DEF, so update UP 856 UPblock[slidx] = false; 857 #ifndef PRODUCT 858 // DEBUG 859 if( trace_spilling() ) { 860 tty->print("\nNew Split DOWN DEF of Spill Idx "); 861 tty->print("%d, UP %d:\n",slidx,false); 862 n1->dump(); 863 } 864 #endif 865 } 866 } // end if LRG is UP 867 } // end for all spilling live ranges 868 assert( b->_nodes[insidx] == n, "got insidx set incorrectly" ); 869 } // end if crossing HRP Boundry 870 871 // If the LRG index is oob, then this is a new spillcopy, skip it. 872 if (defidx >= _lrg_map.max_lrg_id()) { 873 continue; 874 } 875 LRG &deflrg = lrgs(defidx); 876 uint copyidx = n->is_Copy(); 877 // Remove coalesced copy from CFG 878 if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { 879 n->replace_by( n->in(copyidx) ); 880 n->set_req( copyidx, NULL ); 881 b->_nodes.remove(insidx--); 882 b->_ihrp_index--; // Adjust the point where we go hi-pressure 883 b->_fhrp_index--; 884 continue; 885 } 886 887 #define DERIVED 0 888 889 // ********** Handle USES ********** 890 bool nullcheck = false; 891 // Implicit null checks never use the spilled value 892 if( n->is_MachNullCheck() ) 893 nullcheck = true; 894 if( !nullcheck ) { 895 // Search all inputs for a Spill-USE 896 JVMState* jvms = n->jvms(); 897 uint oopoff = jvms ? jvms->oopoff() : cnt; 898 uint old_last = cnt - 1; 899 for( inpidx = 1; inpidx < cnt; inpidx++ ) { 900 // Derived/base pairs may be added to our inputs during this loop. 901 // If inpidx > old_last, then one of these new inputs is being 902 // handled. Skip the derived part of the pair, but process 903 // the base like any other input. 904 if (inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED) { 905 continue; // skip derived_debug added below 906 } 907 // Get lidx of input 908 uint useidx = _lrg_map.find_id(n->in(inpidx)); 909 // Not a brand-new split, and it is a spill use 910 if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) { 911 // Check for valid reaching DEF 912 slidx = lrg2reach[useidx]; 913 Node *def = Reachblock[slidx]; 914 assert( def != NULL, "Using Undefined Value in Split()\n"); 915 916 // (+++) %%%% remove this in favor of pre-pass in matcher.cpp 917 // monitor references do not care where they live, so just hook 918 if ( jvms && jvms->is_monitor_use(inpidx) ) { 919 // The effect of this clone is to drop the node out of the block, 920 // so that the allocator does not see it anymore, and therefore 921 // does not attempt to assign it a register. 922 def = clone_node(def, b, C); 923 if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 924 return 0; 925 } 926 _lrg_map.extend(def->_idx, 0); 927 _cfg._bbs.map(def->_idx,b); 928 n->set_req(inpidx, def); 929 continue; 930 } 931 932 // Rematerializable? Then clone def at use site instead 933 // of store/load 934 if( def->rematerialize() ) { 935 int old_size = b->_nodes.size(); 936 def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true ); 937 if( !def ) return 0; // Bail out 938 insidx += b->_nodes.size()-old_size; 939 } 940 941 MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL; 942 // Base pointers and oopmap references do not care where they live. 943 if ((inpidx >= oopoff) || 944 (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) { 945 if (def->rematerialize() && lrgs(useidx)._was_spilled2) { 946 // This def has been rematerialized a couple of times without 947 // progress. It doesn't care if it lives UP or DOWN, so 948 // spill it down now. 949 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false,splits,slidx); 950 // If it wasn't split bail 951 if (!maxlrg) { 952 return 0; 953 } 954 insidx++; // Reset iterator to skip USE side split 955 } else { 956 // Just hook the def edge 957 n->set_req(inpidx, def); 958 } 959 960 if (inpidx >= oopoff) { 961 // After oopoff, we have derived/base pairs. We must mention all 962 // derived pointers here as derived/base pairs for GC. If the 963 // derived value is spilling and we have a copy both in Reachblock 964 // (called here 'def') and debug_defs[slidx] we need to mention 965 // both in derived/base pairs or kill one. 966 Node *derived_debug = debug_defs[slidx]; 967 if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base? 968 mach && mach->ideal_Opcode() != Op_Halt && 969 derived_debug != NULL && 970 derived_debug != def ) { // Actual 2nd value appears 971 // We have already set 'def' as a derived value. 972 // Also set debug_defs[slidx] as a derived value. 973 uint k; 974 for( k = oopoff; k < cnt; k += 2 ) 975 if( n->in(k) == derived_debug ) 976 break; // Found an instance of debug derived 977 if( k == cnt ) {// No instance of debug_defs[slidx] 978 // Add a derived/base pair to cover the debug info. 979 // We have to process the added base later since it is not 980 // handled yet at this point but skip derived part. 981 assert(((n->req() - oopoff) & 1) == DERIVED, 982 "must match skip condition above"); 983 n->add_req( derived_debug ); // this will be skipped above 984 n->add_req( n->in(inpidx+1) ); // this will be processed 985 // Increment cnt to handle added input edges on 986 // subsequent iterations. 987 cnt += 2; 988 } 989 } 990 } 991 continue; 992 } 993 // Special logic for DEBUG info 994 if( jvms && b->_freq > BLOCK_FREQUENCY(0.5) ) { 995 uint debug_start = jvms->debug_start(); 996 // If this is debug info use & there is a reaching DOWN def 997 if ((debug_start <= inpidx) && (debug_defs[slidx] != NULL)) { 998 assert(inpidx < oopoff, "handle only debug info here"); 999 // Just hook it in & move on 1000 n->set_req(inpidx, debug_defs[slidx]); 1001 // (Note that this can make two sides of a split live at the 1002 // same time: The debug def on stack, and another def in a 1003 // register. The GC needs to know about both of them, but any 1004 // derived pointers after oopoff will refer to only one of the 1005 // two defs and the GC would therefore miss the other. Thus 1006 // this hack is only allowed for debug info which is Java state 1007 // and therefore never a derived pointer.) 1008 continue; 1009 } 1010 } 1011 // Grab register mask info 1012 const RegMask &dmask = def->out_RegMask(); 1013 const RegMask &umask = n->in_RegMask(inpidx); 1014 bool is_vect = RegMask::is_vector(def->ideal_reg()); 1015 assert(inpidx < oopoff, "cannot use-split oop map info"); 1016 1017 bool dup = UPblock[slidx]; 1018 bool uup = umask.is_UP(); 1019 1020 // Need special logic to handle bound USES. Insert a split at this 1021 // bound use if we can't rematerialize the def, or if we need the 1022 // split to form a misaligned pair. 1023 if( !umask.is_AllStack() && 1024 (int)umask.Size() <= lrgs(useidx).num_regs() && 1025 (!def->rematerialize() || 1026 !is_vect && umask.is_misaligned_pair())) { 1027 // These need a Split regardless of overlap or pressure 1028 // SPLIT - NO DEF - NO CISC SPILL 1029 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx); 1030 // If it wasn't split bail 1031 if (!maxlrg) { 1032 return 0; 1033 } 1034 insidx++; // Reset iterator to skip USE side split 1035 continue; 1036 } 1037 1038 if (UseFPUForSpilling && n->is_MachCall() && !uup && !dup ) { 1039 // The use at the call can force the def down so insert 1040 // a split before the use to allow the def more freedom. 1041 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx); 1042 // If it wasn't split bail 1043 if (!maxlrg) { 1044 return 0; 1045 } 1046 insidx++; // Reset iterator to skip USE side split 1047 continue; 1048 } 1049 1050 // Here is the logic chart which describes USE Splitting: 1051 // 0 = false or DOWN, 1 = true or UP 1052 // 1053 // Overlap | DEF | USE | Action 1054 //------------------------------------------------------- 1055 // 0 | 0 | 0 | Copy - mem -> mem 1056 // 0 | 0 | 1 | Split-UP - Check HRP 1057 // 0 | 1 | 0 | Split-DOWN - Debug Info? 1058 // 0 | 1 | 1 | Copy - reg -> reg 1059 // 1 | 0 | 0 | Reset Input Edge (no Split) 1060 // 1 | 0 | 1 | Split-UP - Check HRP 1061 // 1 | 1 | 0 | Split-DOWN - Debug Info? 1062 // 1 | 1 | 1 | Reset Input Edge (no Split) 1063 // 1064 // So, if (dup == uup), then overlap test determines action, 1065 // with true being no split, and false being copy. Else, 1066 // if DEF is DOWN, Split-UP, and check HRP to decide on 1067 // resetting DEF. Finally if DEF is UP, Split-DOWN, with 1068 // special handling for Debug Info. 1069 if( dup == uup ) { 1070 if( dmask.overlap(umask) ) { 1071 // Both are either up or down, and there is overlap, No Split 1072 n->set_req(inpidx, def); 1073 } 1074 else { // Both are either up or down, and there is no overlap 1075 if( dup ) { // If UP, reg->reg copy 1076 // COPY ACROSS HERE - NO DEF - NO CISC SPILL 1077 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx); 1078 // If it wasn't split bail 1079 if (!maxlrg) { 1080 return 0; 1081 } 1082 insidx++; // Reset iterator to skip USE side split 1083 } 1084 else { // DOWN, mem->mem copy 1085 // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL 1086 // First Split-UP to move value into Register 1087 uint def_ideal = def->ideal_reg(); 1088 const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal]; 1089 Node *spill = new (C) MachSpillCopyNode(def, dmask, *tmp_rm); 1090 insert_proj( b, insidx, spill, maxlrg ); 1091 // Then Split-DOWN as if previous Split was DEF 1092 maxlrg = split_USE(spill,b,n,inpidx,maxlrg,false,false, splits,slidx); 1093 // If it wasn't split bail 1094 if (!maxlrg) { 1095 return 0; 1096 } 1097 insidx += 2; // Reset iterator to skip USE side splits 1098 } 1099 } // End else no overlap 1100 } // End if dup == uup 1101 // dup != uup, so check dup for direction of Split 1102 else { 1103 if( dup ) { // If UP, Split-DOWN and check Debug Info 1104 // If this node is already a SpillCopy, just patch the edge 1105 // except the case of spilling to stack. 1106 if( n->is_SpillCopy() ) { 1107 RegMask tmp_rm(umask); 1108 tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask); 1109 if( dmask.overlap(tmp_rm) ) { 1110 if( def != n->in(inpidx) ) { 1111 n->set_req(inpidx, def); 1112 } 1113 continue; 1114 } 1115 } 1116 // COPY DOWN HERE - NO DEF - NO CISC SPILL 1117 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx); 1118 // If it wasn't split bail 1119 if (!maxlrg) { 1120 return 0; 1121 } 1122 insidx++; // Reset iterator to skip USE side split 1123 // Check for debug-info split. Capture it for later 1124 // debug splits of the same value 1125 if (jvms && jvms->debug_start() <= inpidx && inpidx < oopoff) 1126 debug_defs[slidx] = n->in(inpidx); 1127 1128 } 1129 else { // DOWN, Split-UP and check register pressure 1130 if( is_high_pressure( b, &lrgs(useidx), insidx ) ) { 1131 // COPY UP HERE - NO DEF - CISC SPILL 1132 maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,true, splits,slidx); 1133 // If it wasn't split bail 1134 if (!maxlrg) { 1135 return 0; 1136 } 1137 insidx++; // Reset iterator to skip USE side split 1138 } else { // LRP 1139 // COPY UP HERE - WITH DEF - NO CISC SPILL 1140 maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,false, splits,slidx); 1141 // If it wasn't split bail 1142 if (!maxlrg) { 1143 return 0; 1144 } 1145 // Flag this lift-up in a low-pressure block as 1146 // already-spilled, so if it spills again it will 1147 // spill hard (instead of not spilling hard and 1148 // coalescing away). 1149 set_was_spilled(n->in(inpidx)); 1150 // Since this is a new DEF, update Reachblock & UP 1151 Reachblock[slidx] = n->in(inpidx); 1152 UPblock[slidx] = true; 1153 insidx++; // Reset iterator to skip USE side split 1154 } 1155 } // End else DOWN 1156 } // End dup != uup 1157 } // End if Spill USE 1158 } // End For All Inputs 1159 } // End If not nullcheck 1160 1161 // ********** Handle DEFS ********** 1162 // DEFS either Split DOWN in HRP regions or when the LRG is bound, or 1163 // just reset the Reaches info in LRP regions. DEFS must always update 1164 // UP info. 1165 if( deflrg.reg() >= LRG::SPILL_REG ) { // Spilled? 1166 uint slidx = lrg2reach[defidx]; 1167 // Add to defs list for later assignment of new live range number 1168 defs->push(n); 1169 // Set a flag on the Node indicating it has already spilled. 1170 // Only do it for capacity spills not conflict spills. 1171 if( !deflrg._direct_conflict ) 1172 set_was_spilled(n); 1173 assert(!n->is_Phi(),"Cannot insert Phi into DEFS list"); 1174 // Grab UP info for DEF 1175 const RegMask &dmask = n->out_RegMask(); 1176 bool defup = dmask.is_UP(); 1177 int ireg = n->ideal_reg(); 1178 bool is_vect = RegMask::is_vector(ireg); 1179 // Only split at Def if this is a HRP block or bound (and spilled once) 1180 if( !n->rematerialize() && 1181 (((dmask.is_bound(ireg) || !is_vect && dmask.is_misaligned_pair()) && 1182 (deflrg._direct_conflict || deflrg._must_spill)) || 1183 // Check for LRG being up in a register and we are inside a high 1184 // pressure area. Spill it down immediately. 1185 (defup && is_high_pressure(b,&deflrg,insidx))) ) { 1186 assert( !n->rematerialize(), "" ); 1187 assert( !n->is_SpillCopy(), "" ); 1188 // Do a split at the def site. 1189 maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx ); 1190 // If it wasn't split bail 1191 if (!maxlrg) { 1192 return 0; 1193 } 1194 // Split DEF's Down 1195 UPblock[slidx] = 0; 1196 #ifndef PRODUCT 1197 // DEBUG 1198 if( trace_spilling() ) { 1199 tty->print("\nNew Split DOWN DEF of Spill Idx "); 1200 tty->print("%d, UP %d:\n",slidx,false); 1201 n->dump(); 1202 } 1203 #endif 1204 } 1205 else { // Neither bound nor HRP, must be LRP 1206 // otherwise, just record the def 1207 Reachblock[slidx] = n; 1208 // UP should come from the outRegmask() of the DEF 1209 UPblock[slidx] = defup; 1210 // Update debug list of reaching down definitions, kill if DEF is UP 1211 debug_defs[slidx] = defup ? NULL : n; 1212 #ifndef PRODUCT 1213 // DEBUG 1214 if( trace_spilling() ) { 1215 tty->print("\nNew DEF of Spill Idx "); 1216 tty->print("%d, UP %d:\n",slidx,defup); 1217 n->dump(); 1218 } 1219 #endif 1220 } // End else LRP 1221 } // End if spill def 1222 1223 // ********** Split Left Over Mem-Mem Moves ********** 1224 // Check for mem-mem copies and split them now. Do not do this 1225 // to copies about to be spilled; they will be Split shortly. 1226 if (copyidx) { 1227 Node *use = n->in(copyidx); 1228 uint useidx = _lrg_map.find_id(use); 1229 if (useidx < _lrg_map.max_lrg_id() && // This is not a new split 1230 OptoReg::is_stack(deflrg.reg()) && 1231 deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack 1232 LRG &uselrg = lrgs(useidx); 1233 if( OptoReg::is_stack(uselrg.reg()) && 1234 uselrg.reg() < LRG::SPILL_REG && // USE is from stack 1235 deflrg.reg() != uselrg.reg() ) { // Not trivially removed 1236 uint def_ideal_reg = n->bottom_type()->ideal_reg(); 1237 const RegMask &def_rm = *Matcher::idealreg2regmask[def_ideal_reg]; 1238 const RegMask &use_rm = n->in_RegMask(copyidx); 1239 if( def_rm.overlap(use_rm) && n->is_SpillCopy() ) { // Bug 4707800, 'n' may be a storeSSL 1240 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { // Check when generating nodes 1241 return 0; 1242 } 1243 Node *spill = new (C) MachSpillCopyNode(use,use_rm,def_rm); 1244 n->set_req(copyidx,spill); 1245 n->as_MachSpillCopy()->set_in_RegMask(def_rm); 1246 // Put the spill just before the copy 1247 insert_proj( b, insidx++, spill, maxlrg++ ); 1248 } 1249 } 1250 } 1251 } 1252 } // End For All Instructions in Block - Non-PHI Pass 1253 1254 // Check if each LRG is live out of this block so as not to propagate 1255 // beyond the last use of a LRG. 1256 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 1257 uint defidx = lidxs.at(slidx); 1258 IndexSet *liveout = _live->live(b); 1259 if( !liveout->member(defidx) ) { 1260 #ifdef ASSERT 1261 // The index defidx is not live. Check the liveout array to ensure that 1262 // it contains no members which compress to defidx. Finding such an 1263 // instance may be a case to add liveout adjustment in compress_uf_map(). 1264 // See 5063219. 1265 uint member; 1266 IndexSetIterator isi(liveout); 1267 while ((member = isi.next()) != 0) { 1268 assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed"); 1269 } 1270 #endif 1271 Reachblock[slidx] = NULL; 1272 } else { 1273 assert(Reachblock[slidx] != NULL,"No reaching definition for liveout value"); 1274 } 1275 } 1276 #ifndef PRODUCT 1277 if( trace_spilling() ) 1278 b->dump(); 1279 #endif 1280 } // End For All Blocks 1281 1282 //----------PASS 2---------- 1283 // Reset all DEF live range numbers here 1284 for( insidx = 0; insidx < defs->size(); insidx++ ) { 1285 // Grab the def 1286 n1 = defs->at(insidx); 1287 // Set new lidx for DEF 1288 new_lrg(n1, maxlrg++); 1289 } 1290 //----------Phi Node Splitting---------- 1291 // Clean up a phi here, and assign a new live range number 1292 // Cycle through this block's predecessors, collecting Reaches 1293 // info for each spilled LRG and update edges. 1294 // Walk the phis list to patch inputs, split phis, and name phis 1295 uint lrgs_before_phi_split = maxlrg; 1296 for( insidx = 0; insidx < phis->size(); insidx++ ) { 1297 Node *phi = phis->at(insidx); 1298 assert(phi->is_Phi(),"This list must only contain Phi Nodes"); 1299 Block *b = _cfg._bbs[phi->_idx]; 1300 // Grab the live range number 1301 uint lidx = _lrg_map.find_id(phi); 1302 uint slidx = lrg2reach[lidx]; 1303 // Update node to lidx map 1304 new_lrg(phi, maxlrg++); 1305 // Get PASS1's up/down decision for the block. 1306 int phi_up = !!UP_entry[slidx]->test(b->_pre_order); 1307 1308 // Force down if double-spilling live range 1309 if( lrgs(lidx)._was_spilled1 ) 1310 phi_up = false; 1311 1312 // When splitting a Phi we an split it normal or "inverted". 1313 // An inverted split makes the splits target the Phi's UP/DOWN 1314 // sense inverted; then the Phi is followed by a final def-side 1315 // split to invert back. It changes which blocks the spill code 1316 // goes in. 1317 1318 // Walk the predecessor blocks and assign the reaching def to the Phi. 1319 // Split Phi nodes by placing USE side splits wherever the reaching 1320 // DEF has the wrong UP/DOWN value. 1321 for( uint i = 1; i < b->num_preds(); i++ ) { 1322 // Get predecessor block pre-order number 1323 Block *pred = _cfg._bbs[b->pred(i)->_idx]; 1324 pidx = pred->_pre_order; 1325 // Grab reaching def 1326 Node *def = Reaches[pidx][slidx]; 1327 assert( def, "must have reaching def" ); 1328 // If input up/down sense and reg-pressure DISagree 1329 if (def->rematerialize() && contains_no_live_range_input(def)) { 1330 // Place the rematerialized node above any MSCs created during 1331 // phi node splitting. end_idx points at the insertion point 1332 // so look at the node before it. 1333 int insert = pred->end_idx(); 1334 while (insert >= 1 && 1335 pred->_nodes[insert - 1]->is_SpillCopy() && 1336 _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { 1337 insert--; 1338 } 1339 def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false); 1340 if (!def) { 1341 return 0; // Bail out 1342 } 1343 } 1344 // Update the Phi's input edge array 1345 phi->set_req(i,def); 1346 // Grab the UP/DOWN sense for the input 1347 u1 = UP[pidx][slidx]; 1348 if( u1 != (phi_up != 0)) { 1349 maxlrg = split_USE(def, b, phi, i, maxlrg, !u1, false, splits,slidx); 1350 // If it wasn't split bail 1351 if (!maxlrg) { 1352 return 0; 1353 } 1354 } 1355 } // End for all inputs to the Phi 1356 } // End for all Phi Nodes 1357 // Update _maxlrg to save Union asserts 1358 _lrg_map.set_max_lrg_id(maxlrg); 1359 1360 1361 //----------PASS 3---------- 1362 // Pass over all Phi's to union the live ranges 1363 for( insidx = 0; insidx < phis->size(); insidx++ ) { 1364 Node *phi = phis->at(insidx); 1365 assert(phi->is_Phi(),"This list must only contain Phi Nodes"); 1366 // Walk all inputs to Phi and Union input live range with Phi live range 1367 for( uint i = 1; i < phi->req(); i++ ) { 1368 // Grab the input node 1369 Node *n = phi->in(i); 1370 assert(n, "node should exist"); 1371 uint lidx = _lrg_map.find(n); 1372 uint pidx = _lrg_map.find(phi); 1373 if (lidx < pidx) { 1374 Union(n, phi); 1375 } 1376 else if(lidx > pidx) { 1377 Union(phi, n); 1378 } 1379 } // End for all inputs to the Phi Node 1380 } // End for all Phi Nodes 1381 // Now union all two address instructions 1382 for (insidx = 0; insidx < defs->size(); insidx++) { 1383 // Grab the def 1384 n1 = defs->at(insidx); 1385 // Set new lidx for DEF & handle 2-addr instructions 1386 if (n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0)) { 1387 assert(_lrg_map.find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index"); 1388 // Union the input and output live ranges 1389 uint lr1 = _lrg_map.find(n1); 1390 uint lr2 = _lrg_map.find(n1->in(twoidx)); 1391 if (lr1 < lr2) { 1392 Union(n1, n1->in(twoidx)); 1393 } 1394 else if (lr1 > lr2) { 1395 Union(n1->in(twoidx), n1); 1396 } 1397 } // End if two address 1398 } // End for all defs 1399 // DEBUG 1400 #ifdef ASSERT 1401 // Validate all live range index assignments 1402 for (bidx = 0; bidx < _cfg._num_blocks; bidx++) { 1403 b = _cfg._blocks[bidx]; 1404 for (insidx = 0; insidx <= b->end_idx(); insidx++) { 1405 Node *n = b->_nodes[insidx]; 1406 uint defidx = _lrg_map.find(n); 1407 assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split"); 1408 assert(defidx < maxlrg,"Bad live range index in Split"); 1409 } 1410 } 1411 // Issue a warning if splitting made no progress 1412 int noprogress = 0; 1413 for (slidx = 0; slidx < spill_cnt; slidx++) { 1414 if (PrintOpto && WizardMode && splits.at(slidx) == 0) { 1415 tty->print_cr("Failed to split live range %d", lidxs.at(slidx)); 1416 //BREAKPOINT; 1417 } 1418 else { 1419 noprogress++; 1420 } 1421 } 1422 if(!noprogress) { 1423 tty->print_cr("Failed to make progress in Split"); 1424 //BREAKPOINT; 1425 } 1426 #endif 1427 // Return updated count of live ranges 1428 return maxlrg; 1429 }