1 /* 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/chaitin.hpp" 33 #include "opto/loopnode.hpp" 34 #include "opto/machnode.hpp" 35 36 //------------------------------Split-------------------------------------- 37 // Walk the graph in RPO and for each lrg which spills, propagate reaching 38 // definitions. During propagation, split the live range around regions of 39 // High Register Pressure (HRP). If a Def is in a region of Low Register 40 // Pressure (LRP), it will not get spilled until we encounter a region of 41 // HRP between it and one of its uses. We will spill at the transition 42 // point between LRP and HRP. Uses in the HRP region will use the spilled 43 // Def. The first Use outside the HRP region will generate a SpillCopy to 44 // hoist the live range back up into a register, and all subsequent uses 45 // will use that new Def until another HRP region is encountered. Defs in 46 // HRP regions will get trailing SpillCopies to push the LRG down into the 47 // stack immediately. 48 // 49 // As a side effect, unlink from (hence make dead) coalesced copies. 50 // 51 52 static const char out_of_nodes[] = "out of nodes during split"; 53 54 //------------------------------get_spillcopy_wide----------------------------- 55 // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the 56 // wide ideal-register spill-mask if possible. If the 'wide-mask' does 57 // not cover the input (or output), use the input (or output) mask instead. 58 Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) { 59 // If ideal reg doesn't exist we've got a bad schedule happening 60 // that is forcing us to spill something that isn't spillable. 61 // Bail rather than abort 62 int ireg = def->ideal_reg(); 63 if( ireg == 0 || ireg == Op_RegFlags ) { 64 assert(false, "attempted to spill a non-spillable item"); 65 C->record_method_not_compilable("attempted to spill a non-spillable item"); 66 return NULL; 67 } 68 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 69 return NULL; 70 } 71 const RegMask *i_mask = &def->out_RegMask(); 72 const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg]; 73 const RegMask *o_mask = use ? &use->in_RegMask(uidx) : w_mask; 74 const RegMask *w_i_mask = w_mask->overlap( *i_mask ) ? w_mask : i_mask; 75 const RegMask *w_o_mask; 76 77 int num_regs = RegMask::num_registers(ireg); 78 bool is_vect = RegMask::is_vector(ireg); 79 if( w_mask->overlap( *o_mask ) && // Overlap AND 80 ((num_regs == 1) // Single use or aligned 81 || is_vect // or vector 82 || !is_vect && o_mask->is_aligned_pairs()) ) { 83 assert(!is_vect || o_mask->is_aligned_sets(num_regs), "vectors are aligned"); 84 // Don't come here for mis-aligned doubles 85 w_o_mask = w_mask; 86 } else { // wide ideal mask does not overlap with o_mask 87 // Mis-aligned doubles come here and XMM->FPR moves on x86. 88 w_o_mask = o_mask; // Must target desired registers 89 // Does the ideal-reg-mask overlap with o_mask? I.e., can I use 90 // a reg-reg move or do I need a trip across register classes 91 // (and thus through memory)? 92 if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() ) 93 // Here we assume a trip through memory is required. 94 w_i_mask = &C->FIRST_STACK_mask(); 95 } 96 return new (C) MachSpillCopyNode( def, *w_i_mask, *w_o_mask ); 97 } 98 99 //------------------------------insert_proj------------------------------------ 100 // Insert the spill at chosen location. Skip over any intervening Proj's or 101 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block 102 // instead. Update high-pressure indices. Create a new live range. 103 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) { 104 // Skip intervening ProjNodes. Do not insert between a ProjNode and 105 // its definer. 106 while( i < b->number_of_nodes() && 107 (b->get_node(i)->is_Proj() || 108 b->get_node(i)->is_Phi() ) ) 109 i++; 110 111 // Do not insert between a call and his Catch 112 if( b->get_node(i)->is_Catch() ) { 113 // Put the instruction at the top of the fall-thru block. 114 // Find the fall-thru projection 115 while( 1 ) { 116 const CatchProjNode *cp = b->get_node(++i)->as_CatchProj(); 117 if( cp->_con == CatchProjNode::fall_through_index ) 118 break; 119 } 120 int sidx = i - b->end_idx()-1; 121 b = b->_succs[sidx]; // Switch to successor block 122 i = 1; // Right at start of block 123 } 124 125 b->insert_node(spill, i); // Insert node in block 126 _cfg.map_node_to_block(spill, b); // Update node->block mapping to reflect 127 // Adjust the point where we go hi-pressure 128 if( i <= b->_ihrp_index ) b->_ihrp_index++; 129 if( i <= b->_fhrp_index ) b->_fhrp_index++; 130 131 // Assign a new Live Range Number to the SpillCopy and grow 132 // the node->live range mapping. 133 new_lrg(spill,maxlrg); 134 } 135 136 //------------------------------split_DEF-------------------------------------- 137 // There are four categories of Split; UP/DOWN x DEF/USE 138 // Only three of these really occur as DOWN/USE will always color 139 // Any Split with a DEF cannot CISC-Spill now. Thus we need 140 // two helper routines, one for Split DEFS (insert after instruction), 141 // one for Split USES (insert before instruction). DEF insertion 142 // happens inside Split, where the Leaveblock array is updated. 143 uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx ) { 144 #ifdef ASSERT 145 // Increment the counter for this lrg 146 splits.at_put(slidx, splits.at(slidx)+1); 147 #endif 148 // If we are spilling the memory op for an implicit null check, at the 149 // null check location (ie - null check is in HRP block) we need to do 150 // the null-check first, then spill-down in the following block. 151 // (The implicit_null_check function ensures the use is also dominated 152 // by the branch-not-taken block.) 153 Node *be = b->end(); 154 if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) { 155 // Spill goes in the branch-not-taken block 156 b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue]; 157 loc = 0; // Just past the Region 158 } 159 assert( loc >= 0, "must insert past block head" ); 160 161 // Get a def-side SpillCopy 162 Node *spill = get_spillcopy_wide(def,NULL,0); 163 // Did we fail to split?, then bail 164 if (!spill) { 165 return 0; 166 } 167 168 // Insert the spill at chosen location 169 insert_proj( b, loc+1, spill, maxlrg++); 170 171 // Insert new node into Reaches array 172 Reachblock[slidx] = spill; 173 // Update debug list of reaching down definitions by adding this one 174 debug_defs[slidx] = spill; 175 176 // return updated count of live ranges 177 return maxlrg; 178 } 179 180 //------------------------------split_USE-------------------------------------- 181 // Splits at uses can involve redeffing the LRG, so no CISC Spilling there. 182 // Debug uses want to know if def is already stack enabled. 183 uint PhaseChaitin::split_USE( Node *def, Block *b, Node *use, uint useidx, uint maxlrg, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx ) { 184 #ifdef ASSERT 185 // Increment the counter for this lrg 186 splits.at_put(slidx, splits.at(slidx)+1); 187 #endif 188 189 // Some setup stuff for handling debug node uses 190 JVMState* jvms = use->jvms(); 191 uint debug_start = jvms ? jvms->debug_start() : 999999; 192 uint debug_end = jvms ? jvms->debug_end() : 999999; 193 194 //------------------------------------------- 195 // Check for use of debug info 196 if (useidx >= debug_start && useidx < debug_end) { 197 // Actually it's perfectly legal for constant debug info to appear 198 // just unlikely. In this case the optimizer left a ConI of a 4 199 // as both inputs to a Phi with only a debug use. It's a single-def 200 // live range of a rematerializable value. The live range spills, 201 // rematerializes and now the ConI directly feeds into the debug info. 202 // assert(!def->is_Con(), "constant debug info already constructed directly"); 203 204 // Special split handling for Debug Info 205 // If DEF is DOWN, just hook the edge and return 206 // If DEF is UP, Split it DOWN for this USE. 207 if( def->is_Mach() ) { 208 if( def_down ) { 209 // DEF is DOWN, so connect USE directly to the DEF 210 use->set_req(useidx, def); 211 } else { 212 // Block and index where the use occurs. 213 Block *b = _cfg.get_block_for_node(use); 214 // Put the clone just prior to use 215 int bindex = b->find_node(use); 216 // DEF is UP, so must copy it DOWN and hook in USE 217 // Insert SpillCopy before the USE, which uses DEF as its input, 218 // and defs a new live range, which is used by this node. 219 Node *spill = get_spillcopy_wide(def,use,useidx); 220 // did we fail to split? 221 if (!spill) { 222 // Bail 223 return 0; 224 } 225 // insert into basic block 226 insert_proj( b, bindex, spill, maxlrg++ ); 227 // Use the new split 228 use->set_req(useidx,spill); 229 } 230 // No further split handling needed for this use 231 return maxlrg; 232 } // End special splitting for debug info live range 233 } // If debug info 234 235 // CISC-SPILLING 236 // Finally, check to see if USE is CISC-Spillable, and if so, 237 // gather_lrg_masks will add the flags bit to its mask, and 238 // no use side copy is needed. This frees up the live range 239 // register choices without causing copy coalescing, etc. 240 if( UseCISCSpill && cisc_sp ) { 241 int inp = use->cisc_operand(); 242 if( inp != AdlcVMDeps::Not_cisc_spillable ) 243 // Convert operand number to edge index number 244 inp = use->as_Mach()->operand_index(inp); 245 if( inp == (int)useidx ) { 246 use->set_req(useidx, def); 247 #ifndef PRODUCT 248 if( TraceCISCSpill ) { 249 tty->print(" set_split: "); 250 use->dump(); 251 } 252 #endif 253 return maxlrg; 254 } 255 } 256 257 //------------------------------------------- 258 // Insert a Copy before the use 259 260 // Block and index where the use occurs. 261 int bindex; 262 // Phi input spill-copys belong at the end of the prior block 263 if( use->is_Phi() ) { 264 b = _cfg.get_block_for_node(b->pred(useidx)); 265 bindex = b->end_idx(); 266 } else { 267 // Put the clone just prior to use 268 bindex = b->find_node(use); 269 } 270 271 Node *spill = get_spillcopy_wide( def, use, useidx ); 272 if( !spill ) return 0; // Bailed out 273 // Insert SpillCopy before the USE, which uses the reaching DEF as 274 // its input, and defs a new live range, which is used by this node. 275 insert_proj( b, bindex, spill, maxlrg++ ); 276 // Use the spill/clone 277 use->set_req(useidx,spill); 278 279 // return updated live range count 280 return maxlrg; 281 } 282 283 //------------------------------clone_node---------------------------- 284 // Clone node with anti dependence check. 285 Node* clone_node(Node* def, Block *b, Compile* C) { 286 if (def->needs_anti_dependence_check()) { 287 #ifdef ASSERT 288 if (Verbose) { 289 tty->print_cr("RA attempts to clone node with anti_dependence:"); 290 def->dump(-1); tty->cr(); 291 tty->print_cr("into block:"); 292 b->dump(); 293 } 294 #endif 295 if (C->subsume_loads() == true && !C->failing()) { 296 // Retry with subsume_loads == false 297 // If this is the first failure, the sentinel string will "stick" 298 // to the Compile object, and the C2Compiler will see it and retry. 299 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 300 } else { 301 // Bailout without retry 302 C->record_method_not_compilable("RA Split failed: attempt to clone node with anti_dependence"); 303 } 304 return 0; 305 } 306 return def->clone(); 307 } 308 309 //------------------------------split_Rematerialize---------------------------- 310 // Clone a local copy of the def. 311 Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ) { 312 // The input live ranges will be stretched to the site of the new 313 // instruction. They might be stretched past a def and will thus 314 // have the old and new values of the same live range alive at the 315 // same time - a definite no-no. Split out private copies of 316 // the inputs. 317 if( def->req() > 1 ) { 318 for( uint i = 1; i < def->req(); i++ ) { 319 Node *in = def->in(i); 320 // Check for single-def (LRG cannot redefined) 321 uint lidx = _lrg_map.live_range_id(in); 322 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_singledef()) { 323 continue; 324 } 325 326 Block *b_def = _cfg.get_block_for_node(def); 327 int idx_def = b_def->find_node(def); 328 Node *in_spill = get_spillcopy_wide( in, def, i ); 329 if( !in_spill ) return 0; // Bailed out 330 insert_proj(b_def,idx_def,in_spill,maxlrg++); 331 if( b_def == b ) 332 insidx++; 333 def->set_req(i,in_spill); 334 } 335 } 336 337 Node *spill = clone_node(def, b, C); 338 if (spill == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 339 // Check when generating nodes 340 return 0; 341 } 342 343 // See if any inputs are currently being spilled, and take the 344 // latest copy of spilled inputs. 345 if( spill->req() > 1 ) { 346 for( uint i = 1; i < spill->req(); i++ ) { 347 Node *in = spill->in(i); 348 uint lidx = _lrg_map.find_id(in); 349 350 // Walk backwards thru spill copy node intermediates 351 if (walkThru) { 352 while (in->is_SpillCopy() && lidx >= _lrg_map.max_lrg_id()) { 353 in = in->in(1); 354 lidx = _lrg_map.find_id(in); 355 } 356 357 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_multidef()) { 358 // walkThru found a multidef LRG, which is unsafe to use, so 359 // just keep the original def used in the clone. 360 in = spill->in(i); 361 lidx = _lrg_map.find_id(in); 362 } 363 } 364 365 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) { 366 assert(Reachblock != NULL, "Reachblock must be non-NULL"); 367 Node *rdef = Reachblock[lrg2reach[lidx]]; 368 if (rdef) { 369 spill->set_req(i, rdef); 370 } 371 } 372 } 373 } 374 375 376 assert( spill->out_RegMask().is_UP(), "rematerialize to a reg" ); 377 // Rematerialized op is def->spilled+1 378 set_was_spilled(spill); 379 if( _spilled_once.test(def->_idx) ) 380 set_was_spilled(spill); 381 382 insert_proj( b, insidx, spill, maxlrg++ ); 383 #ifdef ASSERT 384 // Increment the counter for this lrg 385 splits.at_put(slidx, splits.at(slidx)+1); 386 #endif 387 // See if the cloned def kills any flags, and copy those kills as well 388 uint i = insidx+1; 389 int found_projs = clone_projs( b, i, def, spill, maxlrg); 390 if (found_projs > 0) { 391 // Adjust the point where we go hi-pressure 392 if (i <= b->_ihrp_index) { 393 b->_ihrp_index += found_projs; 394 } 395 if (i <= b->_fhrp_index) { 396 b->_fhrp_index += found_projs; 397 } 398 } 399 400 return spill; 401 } 402 403 //------------------------------is_high_pressure------------------------------- 404 // Function to compute whether or not this live range is "high pressure" 405 // in this block - whether it spills eagerly or not. 406 bool PhaseChaitin::is_high_pressure( Block *b, LRG *lrg, uint insidx ) { 407 if( lrg->_was_spilled1 ) return true; 408 // Forced spilling due to conflict? Then split only at binding uses 409 // or defs, not for supposed capacity problems. 410 // CNC - Turned off 7/8/99, causes too much spilling 411 // if( lrg->_is_bound ) return false; 412 413 // Use float pressure numbers for vectors. 414 bool is_float_or_vector = lrg->_is_float || lrg->_is_vector; 415 // Not yet reached the high-pressure cutoff point, so low pressure 416 uint hrp_idx = is_float_or_vector ? b->_fhrp_index : b->_ihrp_index; 417 if( insidx < hrp_idx ) return false; 418 // Register pressure for the block as a whole depends on reg class 419 int block_pres = is_float_or_vector ? b->_freg_pressure : b->_reg_pressure; 420 // Bound live ranges will split at the binding points first; 421 // Intermediate splits should assume the live range's register set 422 // got "freed up" and that num_regs will become INT_PRESSURE. 423 int bound_pres = is_float_or_vector ? FLOATPRESSURE : INTPRESSURE; 424 // Effective register pressure limit. 425 int lrg_pres = (lrg->get_invalid_mask_size() > lrg->num_regs()) 426 ? (lrg->get_invalid_mask_size() >> (lrg->num_regs()-1)) : bound_pres; 427 // High pressure if block pressure requires more register freedom 428 // than live range has. 429 return block_pres >= lrg_pres; 430 } 431 432 433 //------------------------------prompt_use--------------------------------- 434 // True if lidx is used before any real register is def'd in the block 435 bool PhaseChaitin::prompt_use( Block *b, uint lidx ) { 436 if (lrgs(lidx)._was_spilled2) { 437 return false; 438 } 439 440 // Scan block for 1st use. 441 for( uint i = 1; i <= b->end_idx(); i++ ) { 442 Node *n = b->get_node(i); 443 // Ignore PHI use, these can be up or down 444 if (n->is_Phi()) { 445 continue; 446 } 447 for (uint j = 1; j < n->req(); j++) { 448 if (_lrg_map.find_id(n->in(j)) == lidx) { 449 return true; // Found 1st use! 450 } 451 } 452 if (n->out_RegMask().is_NotEmpty()) { 453 return false; 454 } 455 } 456 return false; 457 } 458 459 //------------------------------Split-------------------------------------- 460 //----------Split Routine---------- 461 // ***** NEW SPLITTING HEURISTIC ***** 462 // DEFS: If the DEF is in a High Register Pressure(HRP) Block, split there. 463 // Else, no split unless there is a HRP block between a DEF and 464 // one of its uses, and then split at the HRP block. 465 // 466 // USES: If USE is in HRP, split at use to leave main LRG on stack. 467 // Else, hoist LRG back up to register only (ie - split is also DEF) 468 // We will compute a new maxlrg as we go 469 uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { 470 NOT_PRODUCT( Compile::TracePhase t3("regAllocSplit", &_t_regAllocSplit, TimeCompiler); ) 471 472 // Free thread local resources used by this method on exit. 473 ResourceMark rm(split_arena); 474 475 uint bidx, pidx, slidx, insidx, inpidx, twoidx; 476 uint non_phi = 1, spill_cnt = 0; 477 Node **Reachblock; 478 Node *n1, *n2, *n3; 479 Node_List *defs,*phis; 480 bool *UPblock; 481 bool u1, u2, u3; 482 Block *b, *pred; 483 PhiNode *phi; 484 GrowableArray<uint> lidxs(split_arena, maxlrg, 0, 0); 485 486 // Array of counters to count splits per live range 487 GrowableArray<uint> splits(split_arena, maxlrg, 0, 0); 488 489 #define NEW_SPLIT_ARRAY(type, size)\ 490 (type*) split_arena->allocate_bytes((size) * sizeof(type)) 491 492 //----------Setup Code---------- 493 // Create a convenient mapping from lrg numbers to reaches/leaves indices 494 uint *lrg2reach = NEW_SPLIT_ARRAY(uint, maxlrg); 495 // Keep track of DEFS & Phis for later passes 496 defs = new Node_List(); 497 phis = new Node_List(); 498 // Gather info on which LRG's are spilling, and build maps 499 for (bidx = 1; bidx < maxlrg; bidx++) { 500 if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) { 501 assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color"); 502 lrg2reach[bidx] = spill_cnt; 503 spill_cnt++; 504 lidxs.append(bidx); 505 #ifdef ASSERT 506 // Initialize the split counts to zero 507 splits.append(0); 508 #endif 509 #ifndef PRODUCT 510 if( PrintOpto && WizardMode && lrgs(bidx)._was_spilled1 ) 511 tty->print_cr("Warning, 2nd spill of L%d",bidx); 512 #endif 513 } 514 } 515 516 // Create side arrays for propagating reaching defs info. 517 // Each block needs a node pointer for each spilling live range for the 518 // Def which is live into the block. Phi nodes handle multiple input 519 // Defs by querying the output of their predecessor blocks and resolving 520 // them to a single Def at the phi. The pointer is updated for each 521 // Def in the block, and then becomes the output for the block when 522 // processing of the block is complete. We also need to track whether 523 // a Def is UP or DOWN. UP means that it should get a register (ie - 524 // it is always in LRP regions), and DOWN means that it is probably 525 // on the stack (ie - it crosses HRP regions). 526 Node ***Reaches = NEW_SPLIT_ARRAY( Node**, _cfg.number_of_blocks() + 1); 527 bool **UP = NEW_SPLIT_ARRAY( bool*, _cfg.number_of_blocks() + 1); 528 Node **debug_defs = NEW_SPLIT_ARRAY( Node*, spill_cnt ); 529 VectorSet **UP_entry= NEW_SPLIT_ARRAY( VectorSet*, spill_cnt ); 530 531 // Initialize Reaches & UP 532 for (bidx = 0; bidx < _cfg.number_of_blocks() + 1; bidx++) { 533 Reaches[bidx] = NEW_SPLIT_ARRAY( Node*, spill_cnt ); 534 UP[bidx] = NEW_SPLIT_ARRAY( bool, spill_cnt ); 535 Node **Reachblock = Reaches[bidx]; 536 bool *UPblock = UP[bidx]; 537 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 538 UPblock[slidx] = true; // Assume they start in registers 539 Reachblock[slidx] = NULL; // Assume that no def is present 540 } 541 } 542 543 #undef NEW_SPLIT_ARRAY 544 545 // Initialize to array of empty vectorsets 546 for( slidx = 0; slidx < spill_cnt; slidx++ ) 547 UP_entry[slidx] = new VectorSet(split_arena); 548 549 //----------PASS 1---------- 550 //----------Propagation & Node Insertion Code---------- 551 // Walk the Blocks in RPO for DEF & USE info 552 for( bidx = 0; bidx < _cfg.number_of_blocks(); bidx++ ) { 553 554 if (C->check_node_count(spill_cnt, out_of_nodes)) { 555 return 0; 556 } 557 558 b = _cfg.get_block(bidx); 559 // Reaches & UP arrays for this block 560 Reachblock = Reaches[b->_pre_order]; 561 UPblock = UP[b->_pre_order]; 562 // Reset counter of start of non-Phi nodes in block 563 non_phi = 1; 564 //----------Block Entry Handling---------- 565 // Check for need to insert a new phi 566 // Cycle through this block's predecessors, collecting Reaches 567 // info for each spilled LRG. If they are identical, no phi is 568 // needed. If they differ, check for a phi, and insert if missing, 569 // or update edges if present. Set current block's Reaches set to 570 // be either the phi's or the reaching def, as appropriate. 571 // If no Phi is needed, check if the LRG needs to spill on entry 572 // to the block due to HRP. 573 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 574 // Grab the live range number 575 uint lidx = lidxs.at(slidx); 576 // Do not bother splitting or putting in Phis for single-def 577 // rematerialized live ranges. This happens alot to constants 578 // with long live ranges. 579 if( lrgs(lidx).is_singledef() && 580 lrgs(lidx)._def->rematerialize() ) { 581 // reset the Reaches & UP entries 582 Reachblock[slidx] = lrgs(lidx)._def; 583 UPblock[slidx] = true; 584 // Record following instruction in case 'n' rematerializes and 585 // kills flags 586 Block *pred1 = _cfg.get_block_for_node(b->pred(1)); 587 continue; 588 } 589 590 // Initialize needs_phi and needs_split 591 bool needs_phi = false; 592 bool needs_split = false; 593 bool has_phi = false; 594 // Walk the predecessor blocks to check inputs for that live range 595 // Grab predecessor block header 596 n1 = b->pred(1); 597 // Grab the appropriate reaching def info for inpidx 598 pred = _cfg.get_block_for_node(n1); 599 pidx = pred->_pre_order; 600 Node **Ltmp = Reaches[pidx]; 601 bool *Utmp = UP[pidx]; 602 n1 = Ltmp[slidx]; 603 u1 = Utmp[slidx]; 604 // Initialize node for saving type info 605 n3 = n1; 606 u3 = u1; 607 608 // Compare inputs to see if a Phi is needed 609 for( inpidx = 2; inpidx < b->num_preds(); inpidx++ ) { 610 // Grab predecessor block headers 611 n2 = b->pred(inpidx); 612 // Grab the appropriate reaching def info for inpidx 613 pred = _cfg.get_block_for_node(n2); 614 pidx = pred->_pre_order; 615 Ltmp = Reaches[pidx]; 616 Utmp = UP[pidx]; 617 n2 = Ltmp[slidx]; 618 u2 = Utmp[slidx]; 619 // For each LRG, decide if a phi is necessary 620 if( n1 != n2 ) { 621 needs_phi = true; 622 } 623 // See if the phi has mismatched inputs, UP vs. DOWN 624 if( n1 && n2 && (u1 != u2) ) { 625 needs_split = true; 626 } 627 // Move n2/u2 to n1/u1 for next iteration 628 n1 = n2; 629 u1 = u2; 630 // Preserve a non-NULL predecessor for later type referencing 631 if( (n3 == NULL) && (n2 != NULL) ){ 632 n3 = n2; 633 u3 = u2; 634 } 635 } // End for all potential Phi inputs 636 637 // check block for appropriate phinode & update edges 638 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { 639 n1 = b->get_node(insidx); 640 // bail if this is not a phi 641 phi = n1->is_Phi() ? n1->as_Phi() : NULL; 642 if( phi == NULL ) { 643 // Keep track of index of first non-PhiNode instruction in block 644 non_phi = insidx; 645 // break out of the for loop as we have handled all phi nodes 646 break; 647 } 648 // must be looking at a phi 649 if (_lrg_map.find_id(n1) == lidxs.at(slidx)) { 650 // found the necessary phi 651 needs_phi = false; 652 has_phi = true; 653 // initialize the Reaches entry for this LRG 654 Reachblock[slidx] = phi; 655 break; 656 } // end if found correct phi 657 } // end for all phi's 658 659 // If a phi is needed or exist, check for it 660 if( needs_phi || has_phi ) { 661 // add new phinode if one not already found 662 if( needs_phi ) { 663 // create a new phi node and insert it into the block 664 // type is taken from left over pointer to a predecessor 665 assert(n3,"No non-NULL reaching DEF for a Phi"); 666 phi = new (C) PhiNode(b->head(), n3->bottom_type()); 667 // initialize the Reaches entry for this LRG 668 Reachblock[slidx] = phi; 669 670 // add node to block & node_to_block mapping 671 insert_proj(b, insidx++, phi, maxlrg++); 672 non_phi++; 673 // Reset new phi's mapping to be the spilling live range 674 _lrg_map.map(phi->_idx, lidx); 675 assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping"); 676 } // end if not found correct phi 677 // Here you have either found or created the Phi, so record it 678 assert(phi != NULL,"Must have a Phi Node here"); 679 phis->push(phi); 680 // PhiNodes should either force the LRG UP or DOWN depending 681 // on its inputs and the register pressure in the Phi's block. 682 UPblock[slidx] = true; // Assume new DEF is UP 683 // If entering a high-pressure area with no immediate use, 684 // assume Phi is DOWN 685 if( is_high_pressure( b, &lrgs(lidx), b->end_idx()) && !prompt_use(b,lidx) ) 686 UPblock[slidx] = false; 687 // If we are not split up/down and all inputs are down, then we 688 // are down 689 if( !needs_split && !u3 ) 690 UPblock[slidx] = false; 691 } // end if phi is needed 692 693 // Do not need a phi, so grab the reaching DEF 694 else { 695 // Grab predecessor block header 696 n1 = b->pred(1); 697 // Grab the appropriate reaching def info for k 698 pred = _cfg.get_block_for_node(n1); 699 pidx = pred->_pre_order; 700 Node **Ltmp = Reaches[pidx]; 701 bool *Utmp = UP[pidx]; 702 // reset the Reaches & UP entries 703 Reachblock[slidx] = Ltmp[slidx]; 704 UPblock[slidx] = Utmp[slidx]; 705 } // end else no Phi is needed 706 } // end for all spilling live ranges 707 // DEBUG 708 #ifndef PRODUCT 709 if(trace_spilling()) { 710 tty->print("/`\nBlock %d: ", b->_pre_order); 711 tty->print("Reaching Definitions after Phi handling\n"); 712 for( uint x = 0; x < spill_cnt; x++ ) { 713 tty->print("Spill Idx %d: UP %d: Node\n",x,UPblock[x]); 714 if( Reachblock[x] ) 715 Reachblock[x]->dump(); 716 else 717 tty->print("Undefined\n"); 718 } 719 } 720 #endif 721 722 //----------Non-Phi Node Splitting---------- 723 // Since phi-nodes have now been handled, the Reachblock array for this 724 // block is initialized with the correct starting value for the defs which 725 // reach non-phi instructions in this block. Thus, process non-phi 726 // instructions normally, inserting SpillCopy nodes for all spill 727 // locations. 728 729 // Memoize any DOWN reaching definitions for use as DEBUG info 730 for( insidx = 0; insidx < spill_cnt; insidx++ ) { 731 debug_defs[insidx] = (UPblock[insidx]) ? NULL : Reachblock[insidx]; 732 if( UPblock[insidx] ) // Memoize UP decision at block start 733 UP_entry[insidx]->set( b->_pre_order ); 734 } 735 736 //----------Walk Instructions in the Block and Split---------- 737 // For all non-phi instructions in the block 738 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { 739 Node *n = b->get_node(insidx); 740 // Find the defining Node's live range index 741 uint defidx = _lrg_map.find_id(n); 742 uint cnt = n->req(); 743 744 if (n->is_Phi()) { 745 // Skip phi nodes after removing dead copies. 746 if (defidx < _lrg_map.max_lrg_id()) { 747 // Check for useless Phis. These appear if we spill, then 748 // coalesce away copies. Dont touch Phis in spilling live 749 // ranges; they are busy getting modifed in this pass. 750 if( lrgs(defidx).reg() < LRG::SPILL_REG ) { 751 uint i; 752 Node *u = NULL; 753 // Look for the Phi merging 2 unique inputs 754 for( i = 1; i < cnt; i++ ) { 755 // Ignore repeats and self 756 if( n->in(i) != u && n->in(i) != n ) { 757 // Found a unique input 758 if( u != NULL ) // If it's the 2nd, bail out 759 break; 760 u = n->in(i); // Else record it 761 } 762 } 763 assert( u, "at least 1 valid input expected" ); 764 if (i >= cnt) { // Found one unique input 765 assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg"); 766 n->replace_by(u); // Then replace with unique input 767 n->disconnect_inputs(NULL, C); 768 b->remove_node(insidx); 769 insidx--; 770 b->_ihrp_index--; 771 b->_fhrp_index--; 772 } 773 } 774 } 775 continue; 776 } 777 assert( insidx > b->_ihrp_index || 778 (b->_reg_pressure < (uint)INTPRESSURE) || 779 b->_ihrp_index > 4000000 || 780 b->_ihrp_index >= b->end_idx() || 781 !b->get_node(b->_ihrp_index)->is_Proj(), "" ); 782 assert( insidx > b->_fhrp_index || 783 (b->_freg_pressure < (uint)FLOATPRESSURE) || 784 b->_fhrp_index > 4000000 || 785 b->_fhrp_index >= b->end_idx() || 786 !b->get_node(b->_fhrp_index)->is_Proj(), "" ); 787 788 // ********** Handle Crossing HRP Boundry ********** 789 if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) { 790 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 791 // Check for need to split at HRP boundary - split if UP 792 n1 = Reachblock[slidx]; 793 // bail out if no reaching DEF 794 if( n1 == NULL ) continue; 795 // bail out if live range is 'isolated' around inner loop 796 uint lidx = lidxs.at(slidx); 797 // If live range is currently UP 798 if( UPblock[slidx] ) { 799 // set location to insert spills at 800 // SPLIT DOWN HERE - NO CISC SPILL 801 if( is_high_pressure( b, &lrgs(lidx), insidx ) && 802 !n1->rematerialize() ) { 803 // If there is already a valid stack definition available, use it 804 if( debug_defs[slidx] != NULL ) { 805 Reachblock[slidx] = debug_defs[slidx]; 806 } 807 else { 808 // Insert point is just past last use or def in the block 809 int insert_point = insidx-1; 810 while( insert_point > 0 ) { 811 Node *n = b->get_node(insert_point); 812 // Hit top of block? Quit going backwards 813 if (n->is_Phi()) { 814 break; 815 } 816 // Found a def? Better split after it. 817 if (_lrg_map.live_range_id(n) == lidx) { 818 break; 819 } 820 // Look for a use 821 uint i; 822 for( i = 1; i < n->req(); i++ ) { 823 if (_lrg_map.live_range_id(n->in(i)) == lidx) { 824 break; 825 } 826 } 827 // Found a use? Better split after it. 828 if (i < n->req()) { 829 break; 830 } 831 insert_point--; 832 } 833 uint orig_eidx = b->end_idx(); 834 maxlrg = split_DEF( n1, b, insert_point, maxlrg, Reachblock, debug_defs, splits, slidx); 835 // If it wasn't split bail 836 if (!maxlrg) { 837 return 0; 838 } 839 // Spill of NULL check mem op goes into the following block. 840 if (b->end_idx() > orig_eidx) { 841 insidx++; 842 } 843 } 844 // This is a new DEF, so update UP 845 UPblock[slidx] = false; 846 #ifndef PRODUCT 847 // DEBUG 848 if( trace_spilling() ) { 849 tty->print("\nNew Split DOWN DEF of Spill Idx "); 850 tty->print("%d, UP %d:\n",slidx,false); 851 n1->dump(); 852 } 853 #endif 854 } 855 } // end if LRG is UP 856 } // end for all spilling live ranges 857 assert( b->get_node(insidx) == n, "got insidx set incorrectly" ); 858 } // end if crossing HRP Boundry 859 860 // If the LRG index is oob, then this is a new spillcopy, skip it. 861 if (defidx >= _lrg_map.max_lrg_id()) { 862 continue; 863 } 864 LRG &deflrg = lrgs(defidx); 865 uint copyidx = n->is_Copy(); 866 // Remove coalesced copy from CFG 867 if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { 868 n->replace_by( n->in(copyidx) ); 869 n->set_req( copyidx, NULL ); 870 b->remove_node(insidx--); 871 b->_ihrp_index--; // Adjust the point where we go hi-pressure 872 b->_fhrp_index--; 873 continue; 874 } 875 876 #define DERIVED 0 877 878 // ********** Handle USES ********** 879 bool nullcheck = false; 880 // Implicit null checks never use the spilled value 881 if( n->is_MachNullCheck() ) 882 nullcheck = true; 883 if( !nullcheck ) { 884 // Search all inputs for a Spill-USE 885 JVMState* jvms = n->jvms(); 886 uint oopoff = jvms ? jvms->oopoff() : cnt; 887 uint old_last = cnt - 1; 888 for( inpidx = 1; inpidx < cnt; inpidx++ ) { 889 // Derived/base pairs may be added to our inputs during this loop. 890 // If inpidx > old_last, then one of these new inputs is being 891 // handled. Skip the derived part of the pair, but process 892 // the base like any other input. 893 if (inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED) { 894 continue; // skip derived_debug added below 895 } 896 // Get lidx of input 897 uint useidx = _lrg_map.find_id(n->in(inpidx)); 898 // Not a brand-new split, and it is a spill use 899 if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) { 900 // Check for valid reaching DEF 901 slidx = lrg2reach[useidx]; 902 Node *def = Reachblock[slidx]; 903 assert( def != NULL, "Using Undefined Value in Split()\n"); 904 905 // (+++) %%%% remove this in favor of pre-pass in matcher.cpp 906 // monitor references do not care where they live, so just hook 907 if ( jvms && jvms->is_monitor_use(inpidx) ) { 908 // The effect of this clone is to drop the node out of the block, 909 // so that the allocator does not see it anymore, and therefore 910 // does not attempt to assign it a register. 911 def = clone_node(def, b, C); 912 if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 913 return 0; 914 } 915 _lrg_map.extend(def->_idx, 0); 916 _cfg.map_node_to_block(def, b); 917 n->set_req(inpidx, def); 918 continue; 919 } 920 921 // Rematerializable? Then clone def at use site instead 922 // of store/load 923 if( def->rematerialize() ) { 924 int old_size = b->number_of_nodes(); 925 def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true ); 926 if( !def ) return 0; // Bail out 927 insidx += b->number_of_nodes()-old_size; 928 } 929 930 MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL; 931 // Base pointers and oopmap references do not care where they live. 932 if ((inpidx >= oopoff) || 933 (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) { 934 if (def->rematerialize() && lrgs(useidx)._was_spilled2) { 935 // This def has been rematerialized a couple of times without 936 // progress. It doesn't care if it lives UP or DOWN, so 937 // spill it down now. 938 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false,splits,slidx); 939 // If it wasn't split bail 940 if (!maxlrg) { 941 return 0; 942 } 943 insidx++; // Reset iterator to skip USE side split 944 } else { 945 // Just hook the def edge 946 n->set_req(inpidx, def); 947 } 948 949 if (inpidx >= oopoff) { 950 // After oopoff, we have derived/base pairs. We must mention all 951 // derived pointers here as derived/base pairs for GC. If the 952 // derived value is spilling and we have a copy both in Reachblock 953 // (called here 'def') and debug_defs[slidx] we need to mention 954 // both in derived/base pairs or kill one. 955 Node *derived_debug = debug_defs[slidx]; 956 if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base? 957 mach && mach->ideal_Opcode() != Op_Halt && 958 derived_debug != NULL && 959 derived_debug != def ) { // Actual 2nd value appears 960 // We have already set 'def' as a derived value. 961 // Also set debug_defs[slidx] as a derived value. 962 uint k; 963 for( k = oopoff; k < cnt; k += 2 ) 964 if( n->in(k) == derived_debug ) 965 break; // Found an instance of debug derived 966 if( k == cnt ) {// No instance of debug_defs[slidx] 967 // Add a derived/base pair to cover the debug info. 968 // We have to process the added base later since it is not 969 // handled yet at this point but skip derived part. 970 assert(((n->req() - oopoff) & 1) == DERIVED, 971 "must match skip condition above"); 972 n->add_req( derived_debug ); // this will be skipped above 973 n->add_req( n->in(inpidx+1) ); // this will be processed 974 // Increment cnt to handle added input edges on 975 // subsequent iterations. 976 cnt += 2; 977 } 978 } 979 } 980 continue; 981 } 982 // Special logic for DEBUG info 983 if( jvms && b->_freq > BLOCK_FREQUENCY(0.5) ) { 984 uint debug_start = jvms->debug_start(); 985 // If this is debug info use & there is a reaching DOWN def 986 if ((debug_start <= inpidx) && (debug_defs[slidx] != NULL)) { 987 assert(inpidx < oopoff, "handle only debug info here"); 988 // Just hook it in & move on 989 n->set_req(inpidx, debug_defs[slidx]); 990 // (Note that this can make two sides of a split live at the 991 // same time: The debug def on stack, and another def in a 992 // register. The GC needs to know about both of them, but any 993 // derived pointers after oopoff will refer to only one of the 994 // two defs and the GC would therefore miss the other. Thus 995 // this hack is only allowed for debug info which is Java state 996 // and therefore never a derived pointer.) 997 continue; 998 } 999 } 1000 // Grab register mask info 1001 const RegMask &dmask = def->out_RegMask(); 1002 const RegMask &umask = n->in_RegMask(inpidx); 1003 bool is_vect = RegMask::is_vector(def->ideal_reg()); 1004 assert(inpidx < oopoff, "cannot use-split oop map info"); 1005 1006 bool dup = UPblock[slidx]; 1007 bool uup = umask.is_UP(); 1008 1009 // Need special logic to handle bound USES. Insert a split at this 1010 // bound use if we can't rematerialize the def, or if we need the 1011 // split to form a misaligned pair. 1012 if( !umask.is_AllStack() && 1013 (int)umask.Size() <= lrgs(useidx).num_regs() && 1014 (!def->rematerialize() || 1015 !is_vect && umask.is_misaligned_pair())) { 1016 // These need a Split regardless of overlap or pressure 1017 // SPLIT - NO DEF - NO CISC SPILL 1018 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx); 1019 // If it wasn't split bail 1020 if (!maxlrg) { 1021 return 0; 1022 } 1023 insidx++; // Reset iterator to skip USE side split 1024 continue; 1025 } 1026 1027 if (UseFPUForSpilling && n->is_MachCall() && !uup && !dup ) { 1028 // The use at the call can force the def down so insert 1029 // a split before the use to allow the def more freedom. 1030 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx); 1031 // If it wasn't split bail 1032 if (!maxlrg) { 1033 return 0; 1034 } 1035 insidx++; // Reset iterator to skip USE side split 1036 continue; 1037 } 1038 1039 // Here is the logic chart which describes USE Splitting: 1040 // 0 = false or DOWN, 1 = true or UP 1041 // 1042 // Overlap | DEF | USE | Action 1043 //------------------------------------------------------- 1044 // 0 | 0 | 0 | Copy - mem -> mem 1045 // 0 | 0 | 1 | Split-UP - Check HRP 1046 // 0 | 1 | 0 | Split-DOWN - Debug Info? 1047 // 0 | 1 | 1 | Copy - reg -> reg 1048 // 1 | 0 | 0 | Reset Input Edge (no Split) 1049 // 1 | 0 | 1 | Split-UP - Check HRP 1050 // 1 | 1 | 0 | Split-DOWN - Debug Info? 1051 // 1 | 1 | 1 | Reset Input Edge (no Split) 1052 // 1053 // So, if (dup == uup), then overlap test determines action, 1054 // with true being no split, and false being copy. Else, 1055 // if DEF is DOWN, Split-UP, and check HRP to decide on 1056 // resetting DEF. Finally if DEF is UP, Split-DOWN, with 1057 // special handling for Debug Info. 1058 if( dup == uup ) { 1059 if( dmask.overlap(umask) ) { 1060 // Both are either up or down, and there is overlap, No Split 1061 n->set_req(inpidx, def); 1062 } 1063 else { // Both are either up or down, and there is no overlap 1064 if( dup ) { // If UP, reg->reg copy 1065 // COPY ACROSS HERE - NO DEF - NO CISC SPILL 1066 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx); 1067 // If it wasn't split bail 1068 if (!maxlrg) { 1069 return 0; 1070 } 1071 insidx++; // Reset iterator to skip USE side split 1072 } 1073 else { // DOWN, mem->mem copy 1074 // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL 1075 // First Split-UP to move value into Register 1076 uint def_ideal = def->ideal_reg(); 1077 const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal]; 1078 Node *spill = new (C) MachSpillCopyNode(def, dmask, *tmp_rm); 1079 insert_proj( b, insidx, spill, maxlrg ); 1080 // Then Split-DOWN as if previous Split was DEF 1081 maxlrg = split_USE(spill,b,n,inpidx,maxlrg,false,false, splits,slidx); 1082 // If it wasn't split bail 1083 if (!maxlrg) { 1084 return 0; 1085 } 1086 insidx += 2; // Reset iterator to skip USE side splits 1087 } 1088 } // End else no overlap 1089 } // End if dup == uup 1090 // dup != uup, so check dup for direction of Split 1091 else { 1092 if( dup ) { // If UP, Split-DOWN and check Debug Info 1093 // If this node is already a SpillCopy, just patch the edge 1094 // except the case of spilling to stack. 1095 if( n->is_SpillCopy() ) { 1096 RegMask tmp_rm(umask); 1097 tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask); 1098 if( dmask.overlap(tmp_rm) ) { 1099 if( def != n->in(inpidx) ) { 1100 n->set_req(inpidx, def); 1101 } 1102 continue; 1103 } 1104 } 1105 // COPY DOWN HERE - NO DEF - NO CISC SPILL 1106 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx); 1107 // If it wasn't split bail 1108 if (!maxlrg) { 1109 return 0; 1110 } 1111 insidx++; // Reset iterator to skip USE side split 1112 // Check for debug-info split. Capture it for later 1113 // debug splits of the same value 1114 if (jvms && jvms->debug_start() <= inpidx && inpidx < oopoff) 1115 debug_defs[slidx] = n->in(inpidx); 1116 1117 } 1118 else { // DOWN, Split-UP and check register pressure 1119 if( is_high_pressure( b, &lrgs(useidx), insidx ) ) { 1120 // COPY UP HERE - NO DEF - CISC SPILL 1121 maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,true, splits,slidx); 1122 // If it wasn't split bail 1123 if (!maxlrg) { 1124 return 0; 1125 } 1126 insidx++; // Reset iterator to skip USE side split 1127 } else { // LRP 1128 // COPY UP HERE - WITH DEF - NO CISC SPILL 1129 maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,false, splits,slidx); 1130 // If it wasn't split bail 1131 if (!maxlrg) { 1132 return 0; 1133 } 1134 // Flag this lift-up in a low-pressure block as 1135 // already-spilled, so if it spills again it will 1136 // spill hard (instead of not spilling hard and 1137 // coalescing away). 1138 set_was_spilled(n->in(inpidx)); 1139 // Since this is a new DEF, update Reachblock & UP 1140 Reachblock[slidx] = n->in(inpidx); 1141 UPblock[slidx] = true; 1142 insidx++; // Reset iterator to skip USE side split 1143 } 1144 } // End else DOWN 1145 } // End dup != uup 1146 } // End if Spill USE 1147 } // End For All Inputs 1148 } // End If not nullcheck 1149 1150 // ********** Handle DEFS ********** 1151 // DEFS either Split DOWN in HRP regions or when the LRG is bound, or 1152 // just reset the Reaches info in LRP regions. DEFS must always update 1153 // UP info. 1154 if( deflrg.reg() >= LRG::SPILL_REG ) { // Spilled? 1155 uint slidx = lrg2reach[defidx]; 1156 // Add to defs list for later assignment of new live range number 1157 defs->push(n); 1158 // Set a flag on the Node indicating it has already spilled. 1159 // Only do it for capacity spills not conflict spills. 1160 if( !deflrg._direct_conflict ) 1161 set_was_spilled(n); 1162 assert(!n->is_Phi(),"Cannot insert Phi into DEFS list"); 1163 // Grab UP info for DEF 1164 const RegMask &dmask = n->out_RegMask(); 1165 bool defup = dmask.is_UP(); 1166 int ireg = n->ideal_reg(); 1167 bool is_vect = RegMask::is_vector(ireg); 1168 // Only split at Def if this is a HRP block or bound (and spilled once) 1169 if( !n->rematerialize() && 1170 (((dmask.is_bound(ireg) || !is_vect && dmask.is_misaligned_pair()) && 1171 (deflrg._direct_conflict || deflrg._must_spill)) || 1172 // Check for LRG being up in a register and we are inside a high 1173 // pressure area. Spill it down immediately. 1174 (defup && is_high_pressure(b,&deflrg,insidx))) ) { 1175 assert( !n->rematerialize(), "" ); 1176 assert( !n->is_SpillCopy(), "" ); 1177 // Do a split at the def site. 1178 maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx ); 1179 // If it wasn't split bail 1180 if (!maxlrg) { 1181 return 0; 1182 } 1183 // Split DEF's Down 1184 UPblock[slidx] = 0; 1185 #ifndef PRODUCT 1186 // DEBUG 1187 if( trace_spilling() ) { 1188 tty->print("\nNew Split DOWN DEF of Spill Idx "); 1189 tty->print("%d, UP %d:\n",slidx,false); 1190 n->dump(); 1191 } 1192 #endif 1193 } 1194 else { // Neither bound nor HRP, must be LRP 1195 // otherwise, just record the def 1196 Reachblock[slidx] = n; 1197 // UP should come from the outRegmask() of the DEF 1198 UPblock[slidx] = defup; 1199 // Update debug list of reaching down definitions, kill if DEF is UP 1200 debug_defs[slidx] = defup ? NULL : n; 1201 #ifndef PRODUCT 1202 // DEBUG 1203 if( trace_spilling() ) { 1204 tty->print("\nNew DEF of Spill Idx "); 1205 tty->print("%d, UP %d:\n",slidx,defup); 1206 n->dump(); 1207 } 1208 #endif 1209 } // End else LRP 1210 } // End if spill def 1211 1212 // ********** Split Left Over Mem-Mem Moves ********** 1213 // Check for mem-mem copies and split them now. Do not do this 1214 // to copies about to be spilled; they will be Split shortly. 1215 if (copyidx) { 1216 Node *use = n->in(copyidx); 1217 uint useidx = _lrg_map.find_id(use); 1218 if (useidx < _lrg_map.max_lrg_id() && // This is not a new split 1219 OptoReg::is_stack(deflrg.reg()) && 1220 deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack 1221 LRG &uselrg = lrgs(useidx); 1222 if( OptoReg::is_stack(uselrg.reg()) && 1223 uselrg.reg() < LRG::SPILL_REG && // USE is from stack 1224 deflrg.reg() != uselrg.reg() ) { // Not trivially removed 1225 uint def_ideal_reg = n->bottom_type()->ideal_reg(); 1226 const RegMask &def_rm = *Matcher::idealreg2regmask[def_ideal_reg]; 1227 const RegMask &use_rm = n->in_RegMask(copyidx); 1228 if( def_rm.overlap(use_rm) && n->is_SpillCopy() ) { // Bug 4707800, 'n' may be a storeSSL 1229 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { // Check when generating nodes 1230 return 0; 1231 } 1232 Node *spill = new (C) MachSpillCopyNode(use,use_rm,def_rm); 1233 n->set_req(copyidx,spill); 1234 n->as_MachSpillCopy()->set_in_RegMask(def_rm); 1235 // Put the spill just before the copy 1236 insert_proj( b, insidx++, spill, maxlrg++ ); 1237 } 1238 } 1239 } 1240 } 1241 } // End For All Instructions in Block - Non-PHI Pass 1242 1243 // Check if each LRG is live out of this block so as not to propagate 1244 // beyond the last use of a LRG. 1245 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 1246 uint defidx = lidxs.at(slidx); 1247 IndexSet *liveout = _live->live(b); 1248 if( !liveout->member(defidx) ) { 1249 #ifdef ASSERT 1250 // The index defidx is not live. Check the liveout array to ensure that 1251 // it contains no members which compress to defidx. Finding such an 1252 // instance may be a case to add liveout adjustment in compress_uf_map(). 1253 // See 5063219. 1254 uint member; 1255 IndexSetIterator isi(liveout); 1256 while ((member = isi.next()) != 0) { 1257 assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed"); 1258 } 1259 #endif 1260 Reachblock[slidx] = NULL; 1261 } else { 1262 assert(Reachblock[slidx] != NULL,"No reaching definition for liveout value"); 1263 } 1264 } 1265 #ifndef PRODUCT 1266 if( trace_spilling() ) 1267 b->dump(); 1268 #endif 1269 } // End For All Blocks 1270 1271 //----------PASS 2---------- 1272 // Reset all DEF live range numbers here 1273 for( insidx = 0; insidx < defs->size(); insidx++ ) { 1274 // Grab the def 1275 n1 = defs->at(insidx); 1276 // Set new lidx for DEF 1277 new_lrg(n1, maxlrg++); 1278 } 1279 //----------Phi Node Splitting---------- 1280 // Clean up a phi here, and assign a new live range number 1281 // Cycle through this block's predecessors, collecting Reaches 1282 // info for each spilled LRG and update edges. 1283 // Walk the phis list to patch inputs, split phis, and name phis 1284 uint lrgs_before_phi_split = maxlrg; 1285 for( insidx = 0; insidx < phis->size(); insidx++ ) { 1286 Node *phi = phis->at(insidx); 1287 assert(phi->is_Phi(),"This list must only contain Phi Nodes"); 1288 Block *b = _cfg.get_block_for_node(phi); 1289 // Grab the live range number 1290 uint lidx = _lrg_map.find_id(phi); 1291 uint slidx = lrg2reach[lidx]; 1292 // Update node to lidx map 1293 new_lrg(phi, maxlrg++); 1294 // Get PASS1's up/down decision for the block. 1295 int phi_up = !!UP_entry[slidx]->test(b->_pre_order); 1296 1297 // Force down if double-spilling live range 1298 if( lrgs(lidx)._was_spilled1 ) 1299 phi_up = false; 1300 1301 // When splitting a Phi we an split it normal or "inverted". 1302 // An inverted split makes the splits target the Phi's UP/DOWN 1303 // sense inverted; then the Phi is followed by a final def-side 1304 // split to invert back. It changes which blocks the spill code 1305 // goes in. 1306 1307 // Walk the predecessor blocks and assign the reaching def to the Phi. 1308 // Split Phi nodes by placing USE side splits wherever the reaching 1309 // DEF has the wrong UP/DOWN value. 1310 for( uint i = 1; i < b->num_preds(); i++ ) { 1311 // Get predecessor block pre-order number 1312 Block *pred = _cfg.get_block_for_node(b->pred(i)); 1313 pidx = pred->_pre_order; 1314 // Grab reaching def 1315 Node *def = Reaches[pidx][slidx]; 1316 assert( def, "must have reaching def" ); 1317 // If input up/down sense and reg-pressure DISagree 1318 if (def->rematerialize()) { 1319 // Place the rematerialized node above any MSCs created during 1320 // phi node splitting. end_idx points at the insertion point 1321 // so look at the node before it. 1322 int insert = pred->end_idx(); 1323 while (insert >= 1 && 1324 pred->get_node(insert - 1)->is_SpillCopy() && 1325 _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) { 1326 insert--; 1327 } 1328 // since the def cannot contain any live range input, we can pass in NULL as Reachlock parameter 1329 def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, NULL, false); 1330 if (!def) { 1331 return 0; // Bail out 1332 } 1333 } 1334 // Update the Phi's input edge array 1335 phi->set_req(i,def); 1336 // Grab the UP/DOWN sense for the input 1337 u1 = UP[pidx][slidx]; 1338 if( u1 != (phi_up != 0)) { 1339 maxlrg = split_USE(def, b, phi, i, maxlrg, !u1, false, splits,slidx); 1340 // If it wasn't split bail 1341 if (!maxlrg) { 1342 return 0; 1343 } 1344 } 1345 } // End for all inputs to the Phi 1346 } // End for all Phi Nodes 1347 // Update _maxlrg to save Union asserts 1348 _lrg_map.set_max_lrg_id(maxlrg); 1349 1350 1351 //----------PASS 3---------- 1352 // Pass over all Phi's to union the live ranges 1353 for( insidx = 0; insidx < phis->size(); insidx++ ) { 1354 Node *phi = phis->at(insidx); 1355 assert(phi->is_Phi(),"This list must only contain Phi Nodes"); 1356 // Walk all inputs to Phi and Union input live range with Phi live range 1357 for( uint i = 1; i < phi->req(); i++ ) { 1358 // Grab the input node 1359 Node *n = phi->in(i); 1360 assert(n, "node should exist"); 1361 uint lidx = _lrg_map.find(n); 1362 uint pidx = _lrg_map.find(phi); 1363 if (lidx < pidx) { 1364 Union(n, phi); 1365 } 1366 else if(lidx > pidx) { 1367 Union(phi, n); 1368 } 1369 } // End for all inputs to the Phi Node 1370 } // End for all Phi Nodes 1371 // Now union all two address instructions 1372 for (insidx = 0; insidx < defs->size(); insidx++) { 1373 // Grab the def 1374 n1 = defs->at(insidx); 1375 // Set new lidx for DEF & handle 2-addr instructions 1376 if (n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0)) { 1377 assert(_lrg_map.find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index"); 1378 // Union the input and output live ranges 1379 uint lr1 = _lrg_map.find(n1); 1380 uint lr2 = _lrg_map.find(n1->in(twoidx)); 1381 if (lr1 < lr2) { 1382 Union(n1, n1->in(twoidx)); 1383 } 1384 else if (lr1 > lr2) { 1385 Union(n1->in(twoidx), n1); 1386 } 1387 } // End if two address 1388 } // End for all defs 1389 // DEBUG 1390 #ifdef ASSERT 1391 // Validate all live range index assignments 1392 for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) { 1393 b = _cfg.get_block(bidx); 1394 for (insidx = 0; insidx <= b->end_idx(); insidx++) { 1395 Node *n = b->get_node(insidx); 1396 uint defidx = _lrg_map.find(n); 1397 assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split"); 1398 assert(defidx < maxlrg,"Bad live range index in Split"); 1399 } 1400 } 1401 // Issue a warning if splitting made no progress 1402 int noprogress = 0; 1403 for (slidx = 0; slidx < spill_cnt; slidx++) { 1404 if (PrintOpto && WizardMode && splits.at(slidx) == 0) { 1405 tty->print_cr("Failed to split live range %d", lidxs.at(slidx)); 1406 //BREAKPOINT; 1407 } 1408 else { 1409 noprogress++; 1410 } 1411 } 1412 if(!noprogress) { 1413 tty->print_cr("Failed to make progress in Split"); 1414 //BREAKPOINT; 1415 } 1416 #endif 1417 // Return updated count of live ranges 1418 return maxlrg; 1419 }