1 /* 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/chaitin.hpp" 33 #include "opto/loopnode.hpp" 34 #include "opto/machnode.hpp" 35 36 //------------------------------Split-------------------------------------- 37 // Walk the graph in RPO and for each lrg which spills, propagate reaching 38 // definitions. During propagation, split the live range around regions of 39 // High Register Pressure (HRP). If a Def is in a region of Low Register 40 // Pressure (LRP), it will not get spilled until we encounter a region of 41 // HRP between it and one of its uses. We will spill at the transition 42 // point between LRP and HRP. Uses in the HRP region will use the spilled 43 // Def. The first Use outside the HRP region will generate a SpillCopy to 44 // hoist the live range back up into a register, and all subsequent uses 45 // will use that new Def until another HRP region is encountered. Defs in 46 // HRP regions will get trailing SpillCopies to push the LRG down into the 47 // stack immediately. 48 // 49 // As a side effect, unlink from (hence make dead) coalesced copies. 50 // 51 52 static const char out_of_nodes[] = "out of nodes during split"; 53 54 static bool contains_no_live_range_input(const Node* def) { 55 for (uint i = 1; i < def->req(); ++i) { 56 if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) { 57 return false; 58 } 59 } 60 return true; 61 } 62 63 //------------------------------get_spillcopy_wide----------------------------- 64 // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the 65 // wide ideal-register spill-mask if possible. If the 'wide-mask' does 66 // not cover the input (or output), use the input (or output) mask instead. 67 Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) { 68 // If ideal reg doesn't exist we've got a bad schedule happening 69 // that is forcing us to spill something that isn't spillable. 70 // Bail rather than abort 71 int ireg = def->ideal_reg(); 72 if( ireg == 0 || ireg == Op_RegFlags ) { 73 assert(false, "attempted to spill a non-spillable item"); 74 C->record_method_not_compilable("attempted to spill a non-spillable item"); 75 return NULL; 76 } 77 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 78 return NULL; 79 } 80 const RegMask *i_mask = &def->out_RegMask(); 81 const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg]; 82 const RegMask *o_mask = use ? &use->in_RegMask(uidx) : w_mask; 83 const RegMask *w_i_mask = w_mask->overlap( *i_mask ) ? w_mask : i_mask; 84 const RegMask *w_o_mask; 85 86 int num_regs = RegMask::num_registers(ireg); 87 bool is_vect = RegMask::is_vector(ireg); 88 if( w_mask->overlap( *o_mask ) && // Overlap AND 89 ((num_regs == 1) // Single use or aligned 90 || is_vect // or vector 91 || !is_vect && o_mask->is_aligned_pairs()) ) { 92 assert(!is_vect || o_mask->is_aligned_sets(num_regs), "vectors are aligned"); 93 // Don't come here for mis-aligned doubles 94 w_o_mask = w_mask; 95 } else { // wide ideal mask does not overlap with o_mask 96 // Mis-aligned doubles come here and XMM->FPR moves on x86. 97 w_o_mask = o_mask; // Must target desired registers 98 // Does the ideal-reg-mask overlap with o_mask? I.e., can I use 99 // a reg-reg move or do I need a trip across register classes 100 // (and thus through memory)? 101 if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() ) 102 // Here we assume a trip through memory is required. 103 w_i_mask = &C->FIRST_STACK_mask(); 104 } 105 return new (C) MachSpillCopyNode( def, *w_i_mask, *w_o_mask ); 106 } 107 108 //------------------------------insert_proj------------------------------------ 109 // Insert the spill at chosen location. Skip over any intervening Proj's or 110 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block 111 // instead. Update high-pressure indices. Create a new live range. 112 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) { 113 // Skip intervening ProjNodes. Do not insert between a ProjNode and 114 // its definer. 115 while( i < b->number_of_nodes() && 116 (b->get_node(i)->is_Proj() || 117 b->get_node(i)->is_Phi() ) ) 118 i++; 119 120 // Do not insert between a call and his Catch 121 if( b->get_node(i)->is_Catch() ) { 122 // Put the instruction at the top of the fall-thru block. 123 // Find the fall-thru projection 124 while( 1 ) { 125 const CatchProjNode *cp = b->get_node(++i)->as_CatchProj(); 126 if( cp->_con == CatchProjNode::fall_through_index ) 127 break; 128 } 129 int sidx = i - b->end_idx()-1; 130 b = b->_succs[sidx]; // Switch to successor block 131 i = 1; // Right at start of block 132 } 133 134 b->insert_node(spill, i); // Insert node in block 135 _cfg.map_node_to_block(spill, b); // Update node->block mapping to reflect 136 // Adjust the point where we go hi-pressure 137 if( i <= b->_ihrp_index ) b->_ihrp_index++; 138 if( i <= b->_fhrp_index ) b->_fhrp_index++; 139 140 // Assign a new Live Range Number to the SpillCopy and grow 141 // the node->live range mapping. 142 new_lrg(spill,maxlrg); 143 } 144 145 //------------------------------split_DEF-------------------------------------- 146 // There are four categories of Split; UP/DOWN x DEF/USE 147 // Only three of these really occur as DOWN/USE will always color 148 // Any Split with a DEF cannot CISC-Spill now. Thus we need 149 // two helper routines, one for Split DEFS (insert after instruction), 150 // one for Split USES (insert before instruction). DEF insertion 151 // happens inside Split, where the Leaveblock array is updated. 152 uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx ) { 153 #ifdef ASSERT 154 // Increment the counter for this lrg 155 splits.at_put(slidx, splits.at(slidx)+1); 156 #endif 157 // If we are spilling the memory op for an implicit null check, at the 158 // null check location (ie - null check is in HRP block) we need to do 159 // the null-check first, then spill-down in the following block. 160 // (The implicit_null_check function ensures the use is also dominated 161 // by the branch-not-taken block.) 162 Node *be = b->end(); 163 if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) { 164 // Spill goes in the branch-not-taken block 165 b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue]; 166 loc = 0; // Just past the Region 167 } 168 assert( loc >= 0, "must insert past block head" ); 169 170 // Get a def-side SpillCopy 171 Node *spill = get_spillcopy_wide(def,NULL,0); 172 // Did we fail to split?, then bail 173 if (!spill) { 174 return 0; 175 } 176 177 // Insert the spill at chosen location 178 insert_proj( b, loc+1, spill, maxlrg++); 179 180 // Insert new node into Reaches array 181 Reachblock[slidx] = spill; 182 // Update debug list of reaching down definitions by adding this one 183 debug_defs[slidx] = spill; 184 185 // return updated count of live ranges 186 return maxlrg; 187 } 188 189 //------------------------------split_USE-------------------------------------- 190 // Splits at uses can involve redeffing the LRG, so no CISC Spilling there. 191 // Debug uses want to know if def is already stack enabled. 192 uint PhaseChaitin::split_USE( Node *def, Block *b, Node *use, uint useidx, uint maxlrg, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx ) { 193 #ifdef ASSERT 194 // Increment the counter for this lrg 195 splits.at_put(slidx, splits.at(slidx)+1); 196 #endif 197 198 // Some setup stuff for handling debug node uses 199 JVMState* jvms = use->jvms(); 200 uint debug_start = jvms ? jvms->debug_start() : 999999; 201 uint debug_end = jvms ? jvms->debug_end() : 999999; 202 203 //------------------------------------------- 204 // Check for use of debug info 205 if (useidx >= debug_start && useidx < debug_end) { 206 // Actually it's perfectly legal for constant debug info to appear 207 // just unlikely. In this case the optimizer left a ConI of a 4 208 // as both inputs to a Phi with only a debug use. It's a single-def 209 // live range of a rematerializable value. The live range spills, 210 // rematerializes and now the ConI directly feeds into the debug info. 211 // assert(!def->is_Con(), "constant debug info already constructed directly"); 212 213 // Special split handling for Debug Info 214 // If DEF is DOWN, just hook the edge and return 215 // If DEF is UP, Split it DOWN for this USE. 216 if( def->is_Mach() ) { 217 if( def_down ) { 218 // DEF is DOWN, so connect USE directly to the DEF 219 use->set_req(useidx, def); 220 } else { 221 // Block and index where the use occurs. 222 Block *b = _cfg.get_block_for_node(use); 223 // Put the clone just prior to use 224 int bindex = b->find_node(use); 225 // DEF is UP, so must copy it DOWN and hook in USE 226 // Insert SpillCopy before the USE, which uses DEF as its input, 227 // and defs a new live range, which is used by this node. 228 Node *spill = get_spillcopy_wide(def,use,useidx); 229 // did we fail to split? 230 if (!spill) { 231 // Bail 232 return 0; 233 } 234 // insert into basic block 235 insert_proj( b, bindex, spill, maxlrg++ ); 236 // Use the new split 237 use->set_req(useidx,spill); 238 } 239 // No further split handling needed for this use 240 return maxlrg; 241 } // End special splitting for debug info live range 242 } // If debug info 243 244 // CISC-SPILLING 245 // Finally, check to see if USE is CISC-Spillable, and if so, 246 // gather_lrg_masks will add the flags bit to its mask, and 247 // no use side copy is needed. This frees up the live range 248 // register choices without causing copy coalescing, etc. 249 if( UseCISCSpill && cisc_sp ) { 250 int inp = use->cisc_operand(); 251 if( inp != AdlcVMDeps::Not_cisc_spillable ) 252 // Convert operand number to edge index number 253 inp = use->as_Mach()->operand_index(inp); 254 if( inp == (int)useidx ) { 255 use->set_req(useidx, def); 256 #ifndef PRODUCT 257 if( TraceCISCSpill ) { 258 tty->print(" set_split: "); 259 use->dump(); 260 } 261 #endif 262 return maxlrg; 263 } 264 } 265 266 //------------------------------------------- 267 // Insert a Copy before the use 268 269 // Block and index where the use occurs. 270 int bindex; 271 // Phi input spill-copys belong at the end of the prior block 272 if( use->is_Phi() ) { 273 b = _cfg.get_block_for_node(b->pred(useidx)); 274 bindex = b->end_idx(); 275 } else { 276 // Put the clone just prior to use 277 bindex = b->find_node(use); 278 } 279 280 Node *spill = get_spillcopy_wide( def, use, useidx ); 281 if( !spill ) return 0; // Bailed out 282 // Insert SpillCopy before the USE, which uses the reaching DEF as 283 // its input, and defs a new live range, which is used by this node. 284 insert_proj( b, bindex, spill, maxlrg++ ); 285 // Use the spill/clone 286 use->set_req(useidx,spill); 287 288 // return updated live range count 289 return maxlrg; 290 } 291 292 //------------------------------clone_node---------------------------- 293 // Clone node with anti dependence check. 294 Node* clone_node(Node* def, Block *b, Compile* C) { 295 if (def->needs_anti_dependence_check()) { 296 #ifdef ASSERT 297 if (Verbose) { 298 tty->print_cr("RA attempts to clone node with anti_dependence:"); 299 def->dump(-1); tty->cr(); 300 tty->print_cr("into block:"); 301 b->dump(); 302 } 303 #endif 304 if (C->subsume_loads() == true && !C->failing()) { 305 // Retry with subsume_loads == false 306 // If this is the first failure, the sentinel string will "stick" 307 // to the Compile object, and the C2Compiler will see it and retry. 308 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 309 } else { 310 // Bailout without retry 311 C->record_method_not_compilable("RA Split failed: attempt to clone node with anti_dependence"); 312 } 313 return 0; 314 } 315 return def->clone(); 316 } 317 318 //------------------------------split_Rematerialize---------------------------- 319 // Clone a local copy of the def. 320 Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ) { 321 // The input live ranges will be stretched to the site of the new 322 // instruction. They might be stretched past a def and will thus 323 // have the old and new values of the same live range alive at the 324 // same time - a definite no-no. Split out private copies of 325 // the inputs. 326 if( def->req() > 1 ) { 327 for( uint i = 1; i < def->req(); i++ ) { 328 Node *in = def->in(i); 329 // Check for single-def (LRG cannot redefined) 330 uint lidx = _lrg_map.live_range_id(in); 331 if (lidx >= _lrg_map.max_lrg_id()) { 332 continue; // Value is a recent spill-copy 333 } 334 if (lrgs(lidx).is_singledef()) { 335 continue; 336 } 337 338 Block *b_def = _cfg.get_block_for_node(def); 339 int idx_def = b_def->find_node(def); 340 Node *in_spill = get_spillcopy_wide( in, def, i ); 341 if( !in_spill ) return 0; // Bailed out 342 insert_proj(b_def,idx_def,in_spill,maxlrg++); 343 if( b_def == b ) 344 insidx++; 345 def->set_req(i,in_spill); 346 } 347 } 348 349 Node *spill = clone_node(def, b, C); 350 if (spill == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 351 // Check when generating nodes 352 return 0; 353 } 354 355 // See if any inputs are currently being spilled, and take the 356 // latest copy of spilled inputs. 357 if( spill->req() > 1 ) { 358 for( uint i = 1; i < spill->req(); i++ ) { 359 Node *in = spill->in(i); 360 uint lidx = _lrg_map.find_id(in); 361 362 // Walk backwards thru spill copy node intermediates 363 if (walkThru) { 364 while (in->is_SpillCopy() && lidx >= _lrg_map.max_lrg_id()) { 365 in = in->in(1); 366 lidx = _lrg_map.find_id(in); 367 } 368 369 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_multidef()) { 370 // walkThru found a multidef LRG, which is unsafe to use, so 371 // just keep the original def used in the clone. 372 in = spill->in(i); 373 lidx = _lrg_map.find_id(in); 374 } 375 } 376 377 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) { 378 assert(Reachblock != NULL, "Reachblock must be non-NULL"); 379 Node *rdef = Reachblock[lrg2reach[lidx]]; 380 if (rdef) { 381 spill->set_req(i, rdef); 382 } 383 } 384 } 385 } 386 387 388 assert( spill->out_RegMask().is_UP(), "rematerialize to a reg" ); 389 // Rematerialized op is def->spilled+1 390 set_was_spilled(spill); 391 if( _spilled_once.test(def->_idx) ) 392 set_was_spilled(spill); 393 394 insert_proj( b, insidx, spill, maxlrg++ ); 395 #ifdef ASSERT 396 // Increment the counter for this lrg 397 splits.at_put(slidx, splits.at(slidx)+1); 398 #endif 399 // See if the cloned def kills any flags, and copy those kills as well 400 uint i = insidx+1; 401 int found_projs = clone_projs( b, i, def, spill, maxlrg); 402 if (found_projs > 0) { 403 // Adjust the point where we go hi-pressure 404 if (i <= b->_ihrp_index) { 405 b->_ihrp_index += found_projs; 406 } 407 if (i <= b->_fhrp_index) { 408 b->_fhrp_index += found_projs; 409 } 410 } 411 412 return spill; 413 } 414 415 //------------------------------is_high_pressure------------------------------- 416 // Function to compute whether or not this live range is "high pressure" 417 // in this block - whether it spills eagerly or not. 418 bool PhaseChaitin::is_high_pressure( Block *b, LRG *lrg, uint insidx ) { 419 if( lrg->_was_spilled1 ) return true; 420 // Forced spilling due to conflict? Then split only at binding uses 421 // or defs, not for supposed capacity problems. 422 // CNC - Turned off 7/8/99, causes too much spilling 423 // if( lrg->_is_bound ) return false; 424 425 // Use float pressure numbers for vectors. 426 bool is_float_or_vector = lrg->_is_float || lrg->_is_vector; 427 // Not yet reached the high-pressure cutoff point, so low pressure 428 uint hrp_idx = is_float_or_vector ? b->_fhrp_index : b->_ihrp_index; 429 if( insidx < hrp_idx ) return false; 430 // Register pressure for the block as a whole depends on reg class 431 int block_pres = is_float_or_vector ? b->_freg_pressure : b->_reg_pressure; 432 // Bound live ranges will split at the binding points first; 433 // Intermediate splits should assume the live range's register set 434 // got "freed up" and that num_regs will become INT_PRESSURE. 435 int bound_pres = is_float_or_vector ? FLOATPRESSURE : INTPRESSURE; 436 // Effective register pressure limit. 437 int lrg_pres = (lrg->get_invalid_mask_size() > lrg->num_regs()) 438 ? (lrg->get_invalid_mask_size() >> (lrg->num_regs()-1)) : bound_pres; 439 // High pressure if block pressure requires more register freedom 440 // than live range has. 441 return block_pres >= lrg_pres; 442 } 443 444 445 //------------------------------prompt_use--------------------------------- 446 // True if lidx is used before any real register is def'd in the block 447 bool PhaseChaitin::prompt_use( Block *b, uint lidx ) { 448 if (lrgs(lidx)._was_spilled2) { 449 return false; 450 } 451 452 // Scan block for 1st use. 453 for( uint i = 1; i <= b->end_idx(); i++ ) { 454 Node *n = b->get_node(i); 455 // Ignore PHI use, these can be up or down 456 if (n->is_Phi()) { 457 continue; 458 } 459 for (uint j = 1; j < n->req(); j++) { 460 if (_lrg_map.find_id(n->in(j)) == lidx) { 461 return true; // Found 1st use! 462 } 463 } 464 if (n->out_RegMask().is_NotEmpty()) { 465 return false; 466 } 467 } 468 return false; 469 } 470 471 //------------------------------Split-------------------------------------- 472 //----------Split Routine---------- 473 // ***** NEW SPLITTING HEURISTIC ***** 474 // DEFS: If the DEF is in a High Register Pressure(HRP) Block, split there. 475 // Else, no split unless there is a HRP block between a DEF and 476 // one of its uses, and then split at the HRP block. 477 // 478 // USES: If USE is in HRP, split at use to leave main LRG on stack. 479 // Else, hoist LRG back up to register only (ie - split is also DEF) 480 // We will compute a new maxlrg as we go 481 uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { 482 NOT_PRODUCT( Compile::TracePhase t3("regAllocSplit", &_t_regAllocSplit, TimeCompiler); ) 483 484 // Free thread local resources used by this method on exit. 485 ResourceMark rm(split_arena); 486 487 uint bidx, pidx, slidx, insidx, inpidx, twoidx; 488 uint non_phi = 1, spill_cnt = 0; 489 Node **Reachblock; 490 Node *n1, *n2, *n3; 491 Node_List *defs,*phis; 492 bool *UPblock; 493 bool u1, u2, u3; 494 Block *b, *pred; 495 PhiNode *phi; 496 GrowableArray<uint> lidxs(split_arena, maxlrg, 0, 0); 497 498 // Array of counters to count splits per live range 499 GrowableArray<uint> splits(split_arena, maxlrg, 0, 0); 500 501 #define NEW_SPLIT_ARRAY(type, size)\ 502 (type*) split_arena->allocate_bytes((size) * sizeof(type)) 503 504 //----------Setup Code---------- 505 // Create a convenient mapping from lrg numbers to reaches/leaves indices 506 uint *lrg2reach = NEW_SPLIT_ARRAY(uint, maxlrg); 507 // Keep track of DEFS & Phis for later passes 508 defs = new Node_List(); 509 phis = new Node_List(); 510 // Gather info on which LRG's are spilling, and build maps 511 for (bidx = 1; bidx < maxlrg; bidx++) { 512 if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) { 513 assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color"); 514 lrg2reach[bidx] = spill_cnt; 515 spill_cnt++; 516 lidxs.append(bidx); 517 #ifdef ASSERT 518 // Initialize the split counts to zero 519 splits.append(0); 520 #endif 521 #ifndef PRODUCT 522 if( PrintOpto && WizardMode && lrgs(bidx)._was_spilled1 ) 523 tty->print_cr("Warning, 2nd spill of L%d",bidx); 524 #endif 525 } 526 } 527 528 // Create side arrays for propagating reaching defs info. 529 // Each block needs a node pointer for each spilling live range for the 530 // Def which is live into the block. Phi nodes handle multiple input 531 // Defs by querying the output of their predecessor blocks and resolving 532 // them to a single Def at the phi. The pointer is updated for each 533 // Def in the block, and then becomes the output for the block when 534 // processing of the block is complete. We also need to track whether 535 // a Def is UP or DOWN. UP means that it should get a register (ie - 536 // it is always in LRP regions), and DOWN means that it is probably 537 // on the stack (ie - it crosses HRP regions). 538 Node ***Reaches = NEW_SPLIT_ARRAY( Node**, _cfg.number_of_blocks() + 1); 539 bool **UP = NEW_SPLIT_ARRAY( bool*, _cfg.number_of_blocks() + 1); 540 Node **debug_defs = NEW_SPLIT_ARRAY( Node*, spill_cnt ); 541 VectorSet **UP_entry= NEW_SPLIT_ARRAY( VectorSet*, spill_cnt ); 542 543 // Initialize Reaches & UP 544 for (bidx = 0; bidx < _cfg.number_of_blocks() + 1; bidx++) { 545 Reaches[bidx] = NEW_SPLIT_ARRAY( Node*, spill_cnt ); 546 UP[bidx] = NEW_SPLIT_ARRAY( bool, spill_cnt ); 547 Node **Reachblock = Reaches[bidx]; 548 bool *UPblock = UP[bidx]; 549 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 550 UPblock[slidx] = true; // Assume they start in registers 551 Reachblock[slidx] = NULL; // Assume that no def is present 552 } 553 } 554 555 #undef NEW_SPLIT_ARRAY 556 557 // Initialize to array of empty vectorsets 558 for( slidx = 0; slidx < spill_cnt; slidx++ ) 559 UP_entry[slidx] = new VectorSet(split_arena); 560 561 //----------PASS 1---------- 562 //----------Propagation & Node Insertion Code---------- 563 // Walk the Blocks in RPO for DEF & USE info 564 for( bidx = 0; bidx < _cfg.number_of_blocks(); bidx++ ) { 565 566 if (C->check_node_count(spill_cnt, out_of_nodes)) { 567 return 0; 568 } 569 570 b = _cfg.get_block(bidx); 571 // Reaches & UP arrays for this block 572 Reachblock = Reaches[b->_pre_order]; 573 UPblock = UP[b->_pre_order]; 574 // Reset counter of start of non-Phi nodes in block 575 non_phi = 1; 576 //----------Block Entry Handling---------- 577 // Check for need to insert a new phi 578 // Cycle through this block's predecessors, collecting Reaches 579 // info for each spilled LRG. If they are identical, no phi is 580 // needed. If they differ, check for a phi, and insert if missing, 581 // or update edges if present. Set current block's Reaches set to 582 // be either the phi's or the reaching def, as appropriate. 583 // If no Phi is needed, check if the LRG needs to spill on entry 584 // to the block due to HRP. 585 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 586 // Grab the live range number 587 uint lidx = lidxs.at(slidx); 588 // Do not bother splitting or putting in Phis for single-def 589 // rematerialized live ranges. This happens alot to constants 590 // with long live ranges. 591 if( lrgs(lidx).is_singledef() && 592 lrgs(lidx)._def->rematerialize() ) { 593 // reset the Reaches & UP entries 594 Reachblock[slidx] = lrgs(lidx)._def; 595 UPblock[slidx] = true; 596 // Record following instruction in case 'n' rematerializes and 597 // kills flags 598 Block *pred1 = _cfg.get_block_for_node(b->pred(1)); 599 continue; 600 } 601 602 // Initialize needs_phi and needs_split 603 bool needs_phi = false; 604 bool needs_split = false; 605 bool has_phi = false; 606 // Walk the predecessor blocks to check inputs for that live range 607 // Grab predecessor block header 608 n1 = b->pred(1); 609 // Grab the appropriate reaching def info for inpidx 610 pred = _cfg.get_block_for_node(n1); 611 pidx = pred->_pre_order; 612 Node **Ltmp = Reaches[pidx]; 613 bool *Utmp = UP[pidx]; 614 n1 = Ltmp[slidx]; 615 u1 = Utmp[slidx]; 616 // Initialize node for saving type info 617 n3 = n1; 618 u3 = u1; 619 620 // Compare inputs to see if a Phi is needed 621 for( inpidx = 2; inpidx < b->num_preds(); inpidx++ ) { 622 // Grab predecessor block headers 623 n2 = b->pred(inpidx); 624 // Grab the appropriate reaching def info for inpidx 625 pred = _cfg.get_block_for_node(n2); 626 pidx = pred->_pre_order; 627 Ltmp = Reaches[pidx]; 628 Utmp = UP[pidx]; 629 n2 = Ltmp[slidx]; 630 u2 = Utmp[slidx]; 631 // For each LRG, decide if a phi is necessary 632 if( n1 != n2 ) { 633 needs_phi = true; 634 } 635 // See if the phi has mismatched inputs, UP vs. DOWN 636 if( n1 && n2 && (u1 != u2) ) { 637 needs_split = true; 638 } 639 // Move n2/u2 to n1/u1 for next iteration 640 n1 = n2; 641 u1 = u2; 642 // Preserve a non-NULL predecessor for later type referencing 643 if( (n3 == NULL) && (n2 != NULL) ){ 644 n3 = n2; 645 u3 = u2; 646 } 647 } // End for all potential Phi inputs 648 649 // check block for appropriate phinode & update edges 650 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { 651 n1 = b->get_node(insidx); 652 // bail if this is not a phi 653 phi = n1->is_Phi() ? n1->as_Phi() : NULL; 654 if( phi == NULL ) { 655 // Keep track of index of first non-PhiNode instruction in block 656 non_phi = insidx; 657 // break out of the for loop as we have handled all phi nodes 658 break; 659 } 660 // must be looking at a phi 661 if (_lrg_map.find_id(n1) == lidxs.at(slidx)) { 662 // found the necessary phi 663 needs_phi = false; 664 has_phi = true; 665 // initialize the Reaches entry for this LRG 666 Reachblock[slidx] = phi; 667 break; 668 } // end if found correct phi 669 } // end for all phi's 670 671 // If a phi is needed or exist, check for it 672 if( needs_phi || has_phi ) { 673 // add new phinode if one not already found 674 if( needs_phi ) { 675 // create a new phi node and insert it into the block 676 // type is taken from left over pointer to a predecessor 677 assert(n3,"No non-NULL reaching DEF for a Phi"); 678 phi = new (C) PhiNode(b->head(), n3->bottom_type()); 679 // initialize the Reaches entry for this LRG 680 Reachblock[slidx] = phi; 681 682 // add node to block & node_to_block mapping 683 insert_proj(b, insidx++, phi, maxlrg++); 684 non_phi++; 685 // Reset new phi's mapping to be the spilling live range 686 _lrg_map.map(phi->_idx, lidx); 687 assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping"); 688 } // end if not found correct phi 689 // Here you have either found or created the Phi, so record it 690 assert(phi != NULL,"Must have a Phi Node here"); 691 phis->push(phi); 692 // PhiNodes should either force the LRG UP or DOWN depending 693 // on its inputs and the register pressure in the Phi's block. 694 UPblock[slidx] = true; // Assume new DEF is UP 695 // If entering a high-pressure area with no immediate use, 696 // assume Phi is DOWN 697 if( is_high_pressure( b, &lrgs(lidx), b->end_idx()) && !prompt_use(b,lidx) ) 698 UPblock[slidx] = false; 699 // If we are not split up/down and all inputs are down, then we 700 // are down 701 if( !needs_split && !u3 ) 702 UPblock[slidx] = false; 703 } // end if phi is needed 704 705 // Do not need a phi, so grab the reaching DEF 706 else { 707 // Grab predecessor block header 708 n1 = b->pred(1); 709 // Grab the appropriate reaching def info for k 710 pred = _cfg.get_block_for_node(n1); 711 pidx = pred->_pre_order; 712 Node **Ltmp = Reaches[pidx]; 713 bool *Utmp = UP[pidx]; 714 // reset the Reaches & UP entries 715 Reachblock[slidx] = Ltmp[slidx]; 716 UPblock[slidx] = Utmp[slidx]; 717 } // end else no Phi is needed 718 } // end for all spilling live ranges 719 // DEBUG 720 #ifndef PRODUCT 721 if(trace_spilling()) { 722 tty->print("/`\nBlock %d: ", b->_pre_order); 723 tty->print("Reaching Definitions after Phi handling\n"); 724 for( uint x = 0; x < spill_cnt; x++ ) { 725 tty->print("Spill Idx %d: UP %d: Node\n",x,UPblock[x]); 726 if( Reachblock[x] ) 727 Reachblock[x]->dump(); 728 else 729 tty->print("Undefined\n"); 730 } 731 } 732 #endif 733 734 //----------Non-Phi Node Splitting---------- 735 // Since phi-nodes have now been handled, the Reachblock array for this 736 // block is initialized with the correct starting value for the defs which 737 // reach non-phi instructions in this block. Thus, process non-phi 738 // instructions normally, inserting SpillCopy nodes for all spill 739 // locations. 740 741 // Memoize any DOWN reaching definitions for use as DEBUG info 742 for( insidx = 0; insidx < spill_cnt; insidx++ ) { 743 debug_defs[insidx] = (UPblock[insidx]) ? NULL : Reachblock[insidx]; 744 if( UPblock[insidx] ) // Memoize UP decision at block start 745 UP_entry[insidx]->set( b->_pre_order ); 746 } 747 748 //----------Walk Instructions in the Block and Split---------- 749 // For all non-phi instructions in the block 750 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { 751 Node *n = b->get_node(insidx); 752 // Find the defining Node's live range index 753 uint defidx = _lrg_map.find_id(n); 754 uint cnt = n->req(); 755 756 if (n->is_Phi()) { 757 // Skip phi nodes after removing dead copies. 758 if (defidx < _lrg_map.max_lrg_id()) { 759 // Check for useless Phis. These appear if we spill, then 760 // coalesce away copies. Dont touch Phis in spilling live 761 // ranges; they are busy getting modifed in this pass. 762 if( lrgs(defidx).reg() < LRG::SPILL_REG ) { 763 uint i; 764 Node *u = NULL; 765 // Look for the Phi merging 2 unique inputs 766 for( i = 1; i < cnt; i++ ) { 767 // Ignore repeats and self 768 if( n->in(i) != u && n->in(i) != n ) { 769 // Found a unique input 770 if( u != NULL ) // If it's the 2nd, bail out 771 break; 772 u = n->in(i); // Else record it 773 } 774 } 775 assert( u, "at least 1 valid input expected" ); 776 if (i >= cnt) { // Found one unique input 777 assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg"); 778 n->replace_by(u); // Then replace with unique input 779 n->disconnect_inputs(NULL, C); 780 b->remove_node(insidx); 781 insidx--; 782 b->_ihrp_index--; 783 b->_fhrp_index--; 784 } 785 } 786 } 787 continue; 788 } 789 assert( insidx > b->_ihrp_index || 790 (b->_reg_pressure < (uint)INTPRESSURE) || 791 b->_ihrp_index > 4000000 || 792 b->_ihrp_index >= b->end_idx() || 793 !b->get_node(b->_ihrp_index)->is_Proj(), "" ); 794 assert( insidx > b->_fhrp_index || 795 (b->_freg_pressure < (uint)FLOATPRESSURE) || 796 b->_fhrp_index > 4000000 || 797 b->_fhrp_index >= b->end_idx() || 798 !b->get_node(b->_fhrp_index)->is_Proj(), "" ); 799 800 // ********** Handle Crossing HRP Boundry ********** 801 if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) { 802 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 803 // Check for need to split at HRP boundary - split if UP 804 n1 = Reachblock[slidx]; 805 // bail out if no reaching DEF 806 if( n1 == NULL ) continue; 807 // bail out if live range is 'isolated' around inner loop 808 uint lidx = lidxs.at(slidx); 809 // If live range is currently UP 810 if( UPblock[slidx] ) { 811 // set location to insert spills at 812 // SPLIT DOWN HERE - NO CISC SPILL 813 if( is_high_pressure( b, &lrgs(lidx), insidx ) && 814 !n1->rematerialize() ) { 815 // If there is already a valid stack definition available, use it 816 if( debug_defs[slidx] != NULL ) { 817 Reachblock[slidx] = debug_defs[slidx]; 818 } 819 else { 820 // Insert point is just past last use or def in the block 821 int insert_point = insidx-1; 822 while( insert_point > 0 ) { 823 Node *n = b->get_node(insert_point); 824 // Hit top of block? Quit going backwards 825 if (n->is_Phi()) { 826 break; 827 } 828 // Found a def? Better split after it. 829 if (_lrg_map.live_range_id(n) == lidx) { 830 break; 831 } 832 // Look for a use 833 uint i; 834 for( i = 1; i < n->req(); i++ ) { 835 if (_lrg_map.live_range_id(n->in(i)) == lidx) { 836 break; 837 } 838 } 839 // Found a use? Better split after it. 840 if (i < n->req()) { 841 break; 842 } 843 insert_point--; 844 } 845 uint orig_eidx = b->end_idx(); 846 maxlrg = split_DEF( n1, b, insert_point, maxlrg, Reachblock, debug_defs, splits, slidx); 847 // If it wasn't split bail 848 if (!maxlrg) { 849 return 0; 850 } 851 // Spill of NULL check mem op goes into the following block. 852 if (b->end_idx() > orig_eidx) { 853 insidx++; 854 } 855 } 856 // This is a new DEF, so update UP 857 UPblock[slidx] = false; 858 #ifndef PRODUCT 859 // DEBUG 860 if( trace_spilling() ) { 861 tty->print("\nNew Split DOWN DEF of Spill Idx "); 862 tty->print("%d, UP %d:\n",slidx,false); 863 n1->dump(); 864 } 865 #endif 866 } 867 } // end if LRG is UP 868 } // end for all spilling live ranges 869 assert( b->get_node(insidx) == n, "got insidx set incorrectly" ); 870 } // end if crossing HRP Boundry 871 872 // If the LRG index is oob, then this is a new spillcopy, skip it. 873 if (defidx >= _lrg_map.max_lrg_id()) { 874 continue; 875 } 876 LRG &deflrg = lrgs(defidx); 877 uint copyidx = n->is_Copy(); 878 // Remove coalesced copy from CFG 879 if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { 880 n->replace_by( n->in(copyidx) ); 881 n->set_req( copyidx, NULL ); 882 b->remove_node(insidx--); 883 b->_ihrp_index--; // Adjust the point where we go hi-pressure 884 b->_fhrp_index--; 885 continue; 886 } 887 888 #define DERIVED 0 889 890 // ********** Handle USES ********** 891 bool nullcheck = false; 892 // Implicit null checks never use the spilled value 893 if( n->is_MachNullCheck() ) 894 nullcheck = true; 895 if( !nullcheck ) { 896 // Search all inputs for a Spill-USE 897 JVMState* jvms = n->jvms(); 898 uint oopoff = jvms ? jvms->oopoff() : cnt; 899 uint old_last = cnt - 1; 900 for( inpidx = 1; inpidx < cnt; inpidx++ ) { 901 // Derived/base pairs may be added to our inputs during this loop. 902 // If inpidx > old_last, then one of these new inputs is being 903 // handled. Skip the derived part of the pair, but process 904 // the base like any other input. 905 if (inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED) { 906 continue; // skip derived_debug added below 907 } 908 // Get lidx of input 909 uint useidx = _lrg_map.find_id(n->in(inpidx)); 910 // Not a brand-new split, and it is a spill use 911 if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) { 912 // Check for valid reaching DEF 913 slidx = lrg2reach[useidx]; 914 Node *def = Reachblock[slidx]; 915 assert( def != NULL, "Using Undefined Value in Split()\n"); 916 917 // (+++) %%%% remove this in favor of pre-pass in matcher.cpp 918 // monitor references do not care where they live, so just hook 919 if ( jvms && jvms->is_monitor_use(inpidx) ) { 920 // The effect of this clone is to drop the node out of the block, 921 // so that the allocator does not see it anymore, and therefore 922 // does not attempt to assign it a register. 923 def = clone_node(def, b, C); 924 if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { 925 return 0; 926 } 927 _lrg_map.extend(def->_idx, 0); 928 _cfg.map_node_to_block(def, b); 929 n->set_req(inpidx, def); 930 continue; 931 } 932 933 // Rematerializable? Then clone def at use site instead 934 // of store/load 935 if( def->rematerialize() ) { 936 int old_size = b->number_of_nodes(); 937 def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true ); 938 if( !def ) return 0; // Bail out 939 insidx += b->number_of_nodes()-old_size; 940 } 941 942 MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL; 943 // Base pointers and oopmap references do not care where they live. 944 if ((inpidx >= oopoff) || 945 (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) { 946 if (def->rematerialize() && lrgs(useidx)._was_spilled2) { 947 // This def has been rematerialized a couple of times without 948 // progress. It doesn't care if it lives UP or DOWN, so 949 // spill it down now. 950 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false,splits,slidx); 951 // If it wasn't split bail 952 if (!maxlrg) { 953 return 0; 954 } 955 insidx++; // Reset iterator to skip USE side split 956 } else { 957 // Just hook the def edge 958 n->set_req(inpidx, def); 959 } 960 961 if (inpidx >= oopoff) { 962 // After oopoff, we have derived/base pairs. We must mention all 963 // derived pointers here as derived/base pairs for GC. If the 964 // derived value is spilling and we have a copy both in Reachblock 965 // (called here 'def') and debug_defs[slidx] we need to mention 966 // both in derived/base pairs or kill one. 967 Node *derived_debug = debug_defs[slidx]; 968 if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base? 969 mach && mach->ideal_Opcode() != Op_Halt && 970 derived_debug != NULL && 971 derived_debug != def ) { // Actual 2nd value appears 972 // We have already set 'def' as a derived value. 973 // Also set debug_defs[slidx] as a derived value. 974 uint k; 975 for( k = oopoff; k < cnt; k += 2 ) 976 if( n->in(k) == derived_debug ) 977 break; // Found an instance of debug derived 978 if( k == cnt ) {// No instance of debug_defs[slidx] 979 // Add a derived/base pair to cover the debug info. 980 // We have to process the added base later since it is not 981 // handled yet at this point but skip derived part. 982 assert(((n->req() - oopoff) & 1) == DERIVED, 983 "must match skip condition above"); 984 n->add_req( derived_debug ); // this will be skipped above 985 n->add_req( n->in(inpidx+1) ); // this will be processed 986 // Increment cnt to handle added input edges on 987 // subsequent iterations. 988 cnt += 2; 989 } 990 } 991 } 992 continue; 993 } 994 // Special logic for DEBUG info 995 if( jvms && b->_freq > BLOCK_FREQUENCY(0.5) ) { 996 uint debug_start = jvms->debug_start(); 997 // If this is debug info use & there is a reaching DOWN def 998 if ((debug_start <= inpidx) && (debug_defs[slidx] != NULL)) { 999 assert(inpidx < oopoff, "handle only debug info here"); 1000 // Just hook it in & move on 1001 n->set_req(inpidx, debug_defs[slidx]); 1002 // (Note that this can make two sides of a split live at the 1003 // same time: The debug def on stack, and another def in a 1004 // register. The GC needs to know about both of them, but any 1005 // derived pointers after oopoff will refer to only one of the 1006 // two defs and the GC would therefore miss the other. Thus 1007 // this hack is only allowed for debug info which is Java state 1008 // and therefore never a derived pointer.) 1009 continue; 1010 } 1011 } 1012 // Grab register mask info 1013 const RegMask &dmask = def->out_RegMask(); 1014 const RegMask &umask = n->in_RegMask(inpidx); 1015 bool is_vect = RegMask::is_vector(def->ideal_reg()); 1016 assert(inpidx < oopoff, "cannot use-split oop map info"); 1017 1018 bool dup = UPblock[slidx]; 1019 bool uup = umask.is_UP(); 1020 1021 // Need special logic to handle bound USES. Insert a split at this 1022 // bound use if we can't rematerialize the def, or if we need the 1023 // split to form a misaligned pair. 1024 if( !umask.is_AllStack() && 1025 (int)umask.Size() <= lrgs(useidx).num_regs() && 1026 (!def->rematerialize() || 1027 !is_vect && umask.is_misaligned_pair())) { 1028 // These need a Split regardless of overlap or pressure 1029 // SPLIT - NO DEF - NO CISC SPILL 1030 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx); 1031 // If it wasn't split bail 1032 if (!maxlrg) { 1033 return 0; 1034 } 1035 insidx++; // Reset iterator to skip USE side split 1036 continue; 1037 } 1038 1039 if (UseFPUForSpilling && n->is_MachCall() && !uup && !dup ) { 1040 // The use at the call can force the def down so insert 1041 // a split before the use to allow the def more freedom. 1042 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx); 1043 // If it wasn't split bail 1044 if (!maxlrg) { 1045 return 0; 1046 } 1047 insidx++; // Reset iterator to skip USE side split 1048 continue; 1049 } 1050 1051 // Here is the logic chart which describes USE Splitting: 1052 // 0 = false or DOWN, 1 = true or UP 1053 // 1054 // Overlap | DEF | USE | Action 1055 //------------------------------------------------------- 1056 // 0 | 0 | 0 | Copy - mem -> mem 1057 // 0 | 0 | 1 | Split-UP - Check HRP 1058 // 0 | 1 | 0 | Split-DOWN - Debug Info? 1059 // 0 | 1 | 1 | Copy - reg -> reg 1060 // 1 | 0 | 0 | Reset Input Edge (no Split) 1061 // 1 | 0 | 1 | Split-UP - Check HRP 1062 // 1 | 1 | 0 | Split-DOWN - Debug Info? 1063 // 1 | 1 | 1 | Reset Input Edge (no Split) 1064 // 1065 // So, if (dup == uup), then overlap test determines action, 1066 // with true being no split, and false being copy. Else, 1067 // if DEF is DOWN, Split-UP, and check HRP to decide on 1068 // resetting DEF. Finally if DEF is UP, Split-DOWN, with 1069 // special handling for Debug Info. 1070 if( dup == uup ) { 1071 if( dmask.overlap(umask) ) { 1072 // Both are either up or down, and there is overlap, No Split 1073 n->set_req(inpidx, def); 1074 } 1075 else { // Both are either up or down, and there is no overlap 1076 if( dup ) { // If UP, reg->reg copy 1077 // COPY ACROSS HERE - NO DEF - NO CISC SPILL 1078 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx); 1079 // If it wasn't split bail 1080 if (!maxlrg) { 1081 return 0; 1082 } 1083 insidx++; // Reset iterator to skip USE side split 1084 } 1085 else { // DOWN, mem->mem copy 1086 // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL 1087 // First Split-UP to move value into Register 1088 uint def_ideal = def->ideal_reg(); 1089 const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal]; 1090 Node *spill = new (C) MachSpillCopyNode(def, dmask, *tmp_rm); 1091 insert_proj( b, insidx, spill, maxlrg ); 1092 // Then Split-DOWN as if previous Split was DEF 1093 maxlrg = split_USE(spill,b,n,inpidx,maxlrg,false,false, splits,slidx); 1094 // If it wasn't split bail 1095 if (!maxlrg) { 1096 return 0; 1097 } 1098 insidx += 2; // Reset iterator to skip USE side splits 1099 } 1100 } // End else no overlap 1101 } // End if dup == uup 1102 // dup != uup, so check dup for direction of Split 1103 else { 1104 if( dup ) { // If UP, Split-DOWN and check Debug Info 1105 // If this node is already a SpillCopy, just patch the edge 1106 // except the case of spilling to stack. 1107 if( n->is_SpillCopy() ) { 1108 RegMask tmp_rm(umask); 1109 tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask); 1110 if( dmask.overlap(tmp_rm) ) { 1111 if( def != n->in(inpidx) ) { 1112 n->set_req(inpidx, def); 1113 } 1114 continue; 1115 } 1116 } 1117 // COPY DOWN HERE - NO DEF - NO CISC SPILL 1118 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx); 1119 // If it wasn't split bail 1120 if (!maxlrg) { 1121 return 0; 1122 } 1123 insidx++; // Reset iterator to skip USE side split 1124 // Check for debug-info split. Capture it for later 1125 // debug splits of the same value 1126 if (jvms && jvms->debug_start() <= inpidx && inpidx < oopoff) 1127 debug_defs[slidx] = n->in(inpidx); 1128 1129 } 1130 else { // DOWN, Split-UP and check register pressure 1131 if( is_high_pressure( b, &lrgs(useidx), insidx ) ) { 1132 // COPY UP HERE - NO DEF - CISC SPILL 1133 maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,true, splits,slidx); 1134 // If it wasn't split bail 1135 if (!maxlrg) { 1136 return 0; 1137 } 1138 insidx++; // Reset iterator to skip USE side split 1139 } else { // LRP 1140 // COPY UP HERE - WITH DEF - NO CISC SPILL 1141 maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,false, splits,slidx); 1142 // If it wasn't split bail 1143 if (!maxlrg) { 1144 return 0; 1145 } 1146 // Flag this lift-up in a low-pressure block as 1147 // already-spilled, so if it spills again it will 1148 // spill hard (instead of not spilling hard and 1149 // coalescing away). 1150 set_was_spilled(n->in(inpidx)); 1151 // Since this is a new DEF, update Reachblock & UP 1152 Reachblock[slidx] = n->in(inpidx); 1153 UPblock[slidx] = true; 1154 insidx++; // Reset iterator to skip USE side split 1155 } 1156 } // End else DOWN 1157 } // End dup != uup 1158 } // End if Spill USE 1159 } // End For All Inputs 1160 } // End If not nullcheck 1161 1162 // ********** Handle DEFS ********** 1163 // DEFS either Split DOWN in HRP regions or when the LRG is bound, or 1164 // just reset the Reaches info in LRP regions. DEFS must always update 1165 // UP info. 1166 if( deflrg.reg() >= LRG::SPILL_REG ) { // Spilled? 1167 uint slidx = lrg2reach[defidx]; 1168 // Add to defs list for later assignment of new live range number 1169 defs->push(n); 1170 // Set a flag on the Node indicating it has already spilled. 1171 // Only do it for capacity spills not conflict spills. 1172 if( !deflrg._direct_conflict ) 1173 set_was_spilled(n); 1174 assert(!n->is_Phi(),"Cannot insert Phi into DEFS list"); 1175 // Grab UP info for DEF 1176 const RegMask &dmask = n->out_RegMask(); 1177 bool defup = dmask.is_UP(); 1178 int ireg = n->ideal_reg(); 1179 bool is_vect = RegMask::is_vector(ireg); 1180 // Only split at Def if this is a HRP block or bound (and spilled once) 1181 if( !n->rematerialize() && 1182 (((dmask.is_bound(ireg) || !is_vect && dmask.is_misaligned_pair()) && 1183 (deflrg._direct_conflict || deflrg._must_spill)) || 1184 // Check for LRG being up in a register and we are inside a high 1185 // pressure area. Spill it down immediately. 1186 (defup && is_high_pressure(b,&deflrg,insidx))) ) { 1187 assert( !n->rematerialize(), "" ); 1188 assert( !n->is_SpillCopy(), "" ); 1189 // Do a split at the def site. 1190 maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx ); 1191 // If it wasn't split bail 1192 if (!maxlrg) { 1193 return 0; 1194 } 1195 // Split DEF's Down 1196 UPblock[slidx] = 0; 1197 #ifndef PRODUCT 1198 // DEBUG 1199 if( trace_spilling() ) { 1200 tty->print("\nNew Split DOWN DEF of Spill Idx "); 1201 tty->print("%d, UP %d:\n",slidx,false); 1202 n->dump(); 1203 } 1204 #endif 1205 } 1206 else { // Neither bound nor HRP, must be LRP 1207 // otherwise, just record the def 1208 Reachblock[slidx] = n; 1209 // UP should come from the outRegmask() of the DEF 1210 UPblock[slidx] = defup; 1211 // Update debug list of reaching down definitions, kill if DEF is UP 1212 debug_defs[slidx] = defup ? NULL : n; 1213 #ifndef PRODUCT 1214 // DEBUG 1215 if( trace_spilling() ) { 1216 tty->print("\nNew DEF of Spill Idx "); 1217 tty->print("%d, UP %d:\n",slidx,defup); 1218 n->dump(); 1219 } 1220 #endif 1221 } // End else LRP 1222 } // End if spill def 1223 1224 // ********** Split Left Over Mem-Mem Moves ********** 1225 // Check for mem-mem copies and split them now. Do not do this 1226 // to copies about to be spilled; they will be Split shortly. 1227 if (copyidx) { 1228 Node *use = n->in(copyidx); 1229 uint useidx = _lrg_map.find_id(use); 1230 if (useidx < _lrg_map.max_lrg_id() && // This is not a new split 1231 OptoReg::is_stack(deflrg.reg()) && 1232 deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack 1233 LRG &uselrg = lrgs(useidx); 1234 if( OptoReg::is_stack(uselrg.reg()) && 1235 uselrg.reg() < LRG::SPILL_REG && // USE is from stack 1236 deflrg.reg() != uselrg.reg() ) { // Not trivially removed 1237 uint def_ideal_reg = n->bottom_type()->ideal_reg(); 1238 const RegMask &def_rm = *Matcher::idealreg2regmask[def_ideal_reg]; 1239 const RegMask &use_rm = n->in_RegMask(copyidx); 1240 if( def_rm.overlap(use_rm) && n->is_SpillCopy() ) { // Bug 4707800, 'n' may be a storeSSL 1241 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { // Check when generating nodes 1242 return 0; 1243 } 1244 Node *spill = new (C) MachSpillCopyNode(use,use_rm,def_rm); 1245 n->set_req(copyidx,spill); 1246 n->as_MachSpillCopy()->set_in_RegMask(def_rm); 1247 // Put the spill just before the copy 1248 insert_proj( b, insidx++, spill, maxlrg++ ); 1249 } 1250 } 1251 } 1252 } 1253 } // End For All Instructions in Block - Non-PHI Pass 1254 1255 // Check if each LRG is live out of this block so as not to propagate 1256 // beyond the last use of a LRG. 1257 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 1258 uint defidx = lidxs.at(slidx); 1259 IndexSet *liveout = _live->live(b); 1260 if( !liveout->member(defidx) ) { 1261 #ifdef ASSERT 1262 // The index defidx is not live. Check the liveout array to ensure that 1263 // it contains no members which compress to defidx. Finding such an 1264 // instance may be a case to add liveout adjustment in compress_uf_map(). 1265 // See 5063219. 1266 uint member; 1267 IndexSetIterator isi(liveout); 1268 while ((member = isi.next()) != 0) { 1269 assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed"); 1270 } 1271 #endif 1272 Reachblock[slidx] = NULL; 1273 } else { 1274 assert(Reachblock[slidx] != NULL,"No reaching definition for liveout value"); 1275 } 1276 } 1277 #ifndef PRODUCT 1278 if( trace_spilling() ) 1279 b->dump(); 1280 #endif 1281 } // End For All Blocks 1282 1283 //----------PASS 2---------- 1284 // Reset all DEF live range numbers here 1285 for( insidx = 0; insidx < defs->size(); insidx++ ) { 1286 // Grab the def 1287 n1 = defs->at(insidx); 1288 // Set new lidx for DEF 1289 new_lrg(n1, maxlrg++); 1290 } 1291 //----------Phi Node Splitting---------- 1292 // Clean up a phi here, and assign a new live range number 1293 // Cycle through this block's predecessors, collecting Reaches 1294 // info for each spilled LRG and update edges. 1295 // Walk the phis list to patch inputs, split phis, and name phis 1296 uint lrgs_before_phi_split = maxlrg; 1297 for( insidx = 0; insidx < phis->size(); insidx++ ) { 1298 Node *phi = phis->at(insidx); 1299 assert(phi->is_Phi(),"This list must only contain Phi Nodes"); 1300 Block *b = _cfg.get_block_for_node(phi); 1301 // Grab the live range number 1302 uint lidx = _lrg_map.find_id(phi); 1303 uint slidx = lrg2reach[lidx]; 1304 // Update node to lidx map 1305 new_lrg(phi, maxlrg++); 1306 // Get PASS1's up/down decision for the block. 1307 int phi_up = !!UP_entry[slidx]->test(b->_pre_order); 1308 1309 // Force down if double-spilling live range 1310 if( lrgs(lidx)._was_spilled1 ) 1311 phi_up = false; 1312 1313 // When splitting a Phi we an split it normal or "inverted". 1314 // An inverted split makes the splits target the Phi's UP/DOWN 1315 // sense inverted; then the Phi is followed by a final def-side 1316 // split to invert back. It changes which blocks the spill code 1317 // goes in. 1318 1319 // Walk the predecessor blocks and assign the reaching def to the Phi. 1320 // Split Phi nodes by placing USE side splits wherever the reaching 1321 // DEF has the wrong UP/DOWN value. 1322 for( uint i = 1; i < b->num_preds(); i++ ) { 1323 // Get predecessor block pre-order number 1324 Block *pred = _cfg.get_block_for_node(b->pred(i)); 1325 pidx = pred->_pre_order; 1326 // Grab reaching def 1327 Node *def = Reaches[pidx][slidx]; 1328 assert( def, "must have reaching def" ); 1329 // If input up/down sense and reg-pressure DISagree 1330 if (def->rematerialize() && contains_no_live_range_input(def)) { 1331 // Place the rematerialized node above any MSCs created during 1332 // phi node splitting. end_idx points at the insertion point 1333 // so look at the node before it. 1334 int insert = pred->end_idx(); 1335 while (insert >= 1 && 1336 pred->get_node(insert - 1)->is_SpillCopy() && 1337 _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) { 1338 insert--; 1339 } 1340 // since the def cannot contain any live range input, we can pass in NULL as Reachlock parameter 1341 def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, NULL, false); 1342 if (!def) { 1343 return 0; // Bail out 1344 } 1345 } 1346 // Update the Phi's input edge array 1347 phi->set_req(i,def); 1348 // Grab the UP/DOWN sense for the input 1349 u1 = UP[pidx][slidx]; 1350 if( u1 != (phi_up != 0)) { 1351 maxlrg = split_USE(def, b, phi, i, maxlrg, !u1, false, splits,slidx); 1352 // If it wasn't split bail 1353 if (!maxlrg) { 1354 return 0; 1355 } 1356 } 1357 } // End for all inputs to the Phi 1358 } // End for all Phi Nodes 1359 // Update _maxlrg to save Union asserts 1360 _lrg_map.set_max_lrg_id(maxlrg); 1361 1362 1363 //----------PASS 3---------- 1364 // Pass over all Phi's to union the live ranges 1365 for( insidx = 0; insidx < phis->size(); insidx++ ) { 1366 Node *phi = phis->at(insidx); 1367 assert(phi->is_Phi(),"This list must only contain Phi Nodes"); 1368 // Walk all inputs to Phi and Union input live range with Phi live range 1369 for( uint i = 1; i < phi->req(); i++ ) { 1370 // Grab the input node 1371 Node *n = phi->in(i); 1372 assert(n, "node should exist"); 1373 uint lidx = _lrg_map.find(n); 1374 uint pidx = _lrg_map.find(phi); 1375 if (lidx < pidx) { 1376 Union(n, phi); 1377 } 1378 else if(lidx > pidx) { 1379 Union(phi, n); 1380 } 1381 } // End for all inputs to the Phi Node 1382 } // End for all Phi Nodes 1383 // Now union all two address instructions 1384 for (insidx = 0; insidx < defs->size(); insidx++) { 1385 // Grab the def 1386 n1 = defs->at(insidx); 1387 // Set new lidx for DEF & handle 2-addr instructions 1388 if (n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0)) { 1389 assert(_lrg_map.find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index"); 1390 // Union the input and output live ranges 1391 uint lr1 = _lrg_map.find(n1); 1392 uint lr2 = _lrg_map.find(n1->in(twoidx)); 1393 if (lr1 < lr2) { 1394 Union(n1, n1->in(twoidx)); 1395 } 1396 else if (lr1 > lr2) { 1397 Union(n1->in(twoidx), n1); 1398 } 1399 } // End if two address 1400 } // End for all defs 1401 // DEBUG 1402 #ifdef ASSERT 1403 // Validate all live range index assignments 1404 for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) { 1405 b = _cfg.get_block(bidx); 1406 for (insidx = 0; insidx <= b->end_idx(); insidx++) { 1407 Node *n = b->get_node(insidx); 1408 uint defidx = _lrg_map.find(n); 1409 assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split"); 1410 assert(defidx < maxlrg,"Bad live range index in Split"); 1411 } 1412 } 1413 // Issue a warning if splitting made no progress 1414 int noprogress = 0; 1415 for (slidx = 0; slidx < spill_cnt; slidx++) { 1416 if (PrintOpto && WizardMode && splits.at(slidx) == 0) { 1417 tty->print_cr("Failed to split live range %d", lidxs.at(slidx)); 1418 //BREAKPOINT; 1419 } 1420 else { 1421 noprogress++; 1422 } 1423 } 1424 if(!noprogress) { 1425 tty->print_cr("Failed to make progress in Split"); 1426 //BREAKPOINT; 1427 } 1428 #endif 1429 // Return updated count of live ranges 1430 return maxlrg; 1431 }