1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "compiler/oopMap.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "opto/addnode.hpp" 30 #include "opto/block.hpp" 31 #include "opto/callnode.hpp" 32 #include "opto/cfgnode.hpp" 33 #include "opto/chaitin.hpp" 34 #include "opto/coalesce.hpp" 35 #include "opto/connode.hpp" 36 #include "opto/idealGraphPrinter.hpp" 37 #include "opto/indexSet.hpp" 38 #include "opto/machnode.hpp" 39 #include "opto/memnode.hpp" 40 #include "opto/opcodes.hpp" 41 #include "opto/rootnode.hpp" 42 43 //============================================================================= 44 45 #ifndef PRODUCT 46 void LRG::dump( ) const { 47 ttyLocker ttyl; 48 tty->print("%d ",num_regs()); 49 _mask.dump(); 50 if( _msize_valid ) { 51 if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size); 52 else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size()); 53 } else { 54 tty->print(", #?(%d) ",_mask.Size()); 55 } 56 57 tty->print("EffDeg: "); 58 if( _degree_valid ) tty->print( "%d ", _eff_degree ); 59 else tty->print("? "); 60 61 if( is_multidef() ) { 62 tty->print("MultiDef "); 63 if (_defs != NULL) { 64 tty->print("("); 65 for (int i = 0; i < _defs->length(); i++) { 66 tty->print("N%d ", _defs->at(i)->_idx); 67 } 68 tty->print(") "); 69 } 70 } 71 else if( _def == 0 ) tty->print("Dead "); 72 else tty->print("Def: N%d ",_def->_idx); 73 74 tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score()); 75 // Flags 76 if( _is_oop ) tty->print("Oop "); 77 if( _is_float ) tty->print("Float "); 78 if( _is_vector ) tty->print("Vector "); 79 if( _was_spilled1 ) tty->print("Spilled "); 80 if( _was_spilled2 ) tty->print("Spilled2 "); 81 if( _direct_conflict ) tty->print("Direct_conflict "); 82 if( _fat_proj ) tty->print("Fat "); 83 if( _was_lo ) tty->print("Lo "); 84 if( _has_copy ) tty->print("Copy "); 85 if( _at_risk ) tty->print("Risk "); 86 87 if( _must_spill ) tty->print("Must_spill "); 88 if( _is_bound ) tty->print("Bound "); 89 if( _msize_valid ) { 90 if( _degree_valid && lo_degree() ) tty->print("Trivial "); 91 } 92 93 tty->cr(); 94 } 95 #endif 96 97 //------------------------------score------------------------------------------ 98 // Compute score from cost and area. Low score is best to spill. 99 static double raw_score( double cost, double area ) { 100 return cost - (area*RegisterCostAreaRatio) * 1.52588e-5; 101 } 102 103 double LRG::score() const { 104 // Scale _area by RegisterCostAreaRatio/64K then subtract from cost. 105 // Bigger area lowers score, encourages spilling this live range. 106 // Bigger cost raise score, prevents spilling this live range. 107 // (Note: 1/65536 is the magic constant below; I dont trust the C optimizer 108 // to turn a divide by a constant into a multiply by the reciprical). 109 double score = raw_score( _cost, _area); 110 111 // Account for area. Basically, LRGs covering large areas are better 112 // to spill because more other LRGs get freed up. 113 if( _area == 0.0 ) // No area? Then no progress to spill 114 return 1e35; 115 116 if( _was_spilled2 ) // If spilled once before, we are unlikely 117 return score + 1e30; // to make progress again. 118 119 if( _cost >= _area*3.0 ) // Tiny area relative to cost 120 return score + 1e17; // Probably no progress to spill 121 122 if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost 123 return score + 1e10; // Likely no progress to spill 124 125 return score; 126 } 127 128 //------------------------------LRG_List--------------------------------------- 129 LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) { 130 memset( _lidxs, 0, sizeof(uint)*max ); 131 } 132 133 void LRG_List::extend( uint nidx, uint lidx ) { 134 _nesting.check(); 135 if( nidx >= _max ) { 136 uint size = 16; 137 while( size <= nidx ) size <<=1; 138 _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size ); 139 _max = size; 140 } 141 while( _cnt <= nidx ) 142 _lidxs[_cnt++] = 0; 143 _lidxs[nidx] = lidx; 144 } 145 146 #define NUMBUCKS 3 147 148 // Straight out of Tarjan's union-find algorithm 149 uint LiveRangeMap::find_compress(uint lrg) { 150 uint cur = lrg; 151 uint next = _uf_map[cur]; 152 while (next != cur) { // Scan chain of equivalences 153 assert( next < cur, "always union smaller"); 154 cur = next; // until find a fixed-point 155 next = _uf_map[cur]; 156 } 157 158 // Core of union-find algorithm: update chain of 159 // equivalences to be equal to the root. 160 while (lrg != next) { 161 uint tmp = _uf_map[lrg]; 162 _uf_map.map(lrg, next); 163 lrg = tmp; 164 } 165 return lrg; 166 } 167 168 // Reset the Union-Find map to identity 169 void LiveRangeMap::reset_uf_map(uint max_lrg_id) { 170 _max_lrg_id= max_lrg_id; 171 // Force the Union-Find mapping to be at least this large 172 _uf_map.extend(_max_lrg_id, 0); 173 // Initialize it to be the ID mapping. 174 for (uint i = 0; i < _max_lrg_id; ++i) { 175 _uf_map.map(i, i); 176 } 177 } 178 179 // Make all Nodes map directly to their final live range; no need for 180 // the Union-Find mapping after this call. 181 void LiveRangeMap::compress_uf_map_for_nodes() { 182 // For all Nodes, compress mapping 183 uint unique = _names.Size(); 184 for (uint i = 0; i < unique; ++i) { 185 uint lrg = _names[i]; 186 uint compressed_lrg = find(lrg); 187 if (lrg != compressed_lrg) { 188 _names.map(i, compressed_lrg); 189 } 190 } 191 } 192 193 // Like Find above, but no path compress, so bad asymptotic behavior 194 uint LiveRangeMap::find_const(uint lrg) const { 195 if (!lrg) { 196 return lrg; // Ignore the zero LRG 197 } 198 199 // Off the end? This happens during debugging dumps when you got 200 // brand new live ranges but have not told the allocator yet. 201 if (lrg >= _max_lrg_id) { 202 return lrg; 203 } 204 205 uint next = _uf_map[lrg]; 206 while (next != lrg) { // Scan chain of equivalences 207 assert(next < lrg, "always union smaller"); 208 lrg = next; // until find a fixed-point 209 next = _uf_map[lrg]; 210 } 211 return next; 212 } 213 214 //------------------------------Chaitin---------------------------------------- 215 PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) 216 : PhaseRegAlloc(unique, cfg, matcher, 217 #ifndef PRODUCT 218 print_chaitin_statistics 219 #else 220 NULL 221 #endif 222 ) 223 , _lrg_map(unique) 224 , _live(0) 225 , _spilled_once(Thread::current()->resource_area()) 226 , _spilled_twice(Thread::current()->resource_area()) 227 , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0) 228 , _oldphi(unique) 229 #ifndef PRODUCT 230 , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling")) 231 #endif 232 { 233 NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); ) 234 235 _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq); 236 237 // Build a list of basic blocks, sorted by frequency 238 _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks ); 239 // Experiment with sorting strategies to speed compilation 240 double cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket 241 Block **buckets[NUMBUCKS]; // Array of buckets 242 uint buckcnt[NUMBUCKS]; // Array of bucket counters 243 double buckval[NUMBUCKS]; // Array of bucket value cutoffs 244 for (uint i = 0; i < NUMBUCKS; i++) { 245 buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks); 246 buckcnt[i] = 0; 247 // Bump by three orders of magnitude each time 248 cutoff *= 0.001; 249 buckval[i] = cutoff; 250 for (uint j = 0; j < _cfg._num_blocks; j++) { 251 buckets[i][j] = NULL; 252 } 253 } 254 // Sort blocks into buckets 255 for (uint i = 0; i < _cfg._num_blocks; i++) { 256 for (uint j = 0; j < NUMBUCKS; j++) { 257 if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) { 258 // Assign block to end of list for appropriate bucket 259 buckets[j][buckcnt[j]++] = _cfg._blocks[i]; 260 break; // kick out of inner loop 261 } 262 } 263 } 264 // Dump buckets into final block array 265 uint blkcnt = 0; 266 for (uint i = 0; i < NUMBUCKS; i++) { 267 for (uint j = 0; j < buckcnt[i]; j++) { 268 _blks[blkcnt++] = buckets[i][j]; 269 } 270 } 271 272 assert(blkcnt == _cfg._num_blocks, "Block array not totally filled"); 273 } 274 275 //------------------------------Union------------------------------------------ 276 // union 2 sets together. 277 void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { 278 uint src = _lrg_map.find(src_n); 279 uint dst = _lrg_map.find(dst_n); 280 assert(src, ""); 281 assert(dst, ""); 282 assert(src < _lrg_map.max_lrg_id(), "oob"); 283 assert(dst < _lrg_map.max_lrg_id(), "oob"); 284 assert(src < dst, "always union smaller"); 285 _lrg_map.uf_map(dst, src); 286 } 287 288 //------------------------------new_lrg---------------------------------------- 289 void PhaseChaitin::new_lrg(const Node *x, uint lrg) { 290 // Make the Node->LRG mapping 291 _lrg_map.extend(x->_idx,lrg); 292 // Make the Union-Find mapping an identity function 293 _lrg_map.uf_extend(lrg, lrg); 294 } 295 296 297 int PhaseChaitin::clone_projs(Block *b, uint idx, Node *orig, Node *copy, uint &max_lrg_id) { 298 assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections"); 299 Block* borig = _cfg._bbs[orig->_idx]; 300 uint bindex = borig->find_node(orig) + 1; 301 Node* proj = borig->_nodes[bindex++]; 302 int found_projs = 0; 303 while (proj->in(0) == orig && proj->is_MachProj()) { 304 found_projs++; 305 // Copy kill projections after the cloned node 306 Node* kills = proj->clone(); 307 kills->set_req(0, copy); 308 b->_nodes.insert(idx++, kills); 309 _cfg._bbs.map(kills->_idx, b); 310 new_lrg(kills, max_lrg_id++); 311 proj = borig->_nodes[bindex++]; 312 } 313 return found_projs; 314 } 315 316 //------------------------------compact---------------------------------------- 317 // Renumber the live ranges to compact them. Makes the IFG smaller. 318 void PhaseChaitin::compact() { 319 // Current the _uf_map contains a series of short chains which are headed 320 // by a self-cycle. All the chains run from big numbers to little numbers. 321 // The Find() call chases the chains & shortens them for the next Find call. 322 // We are going to change this structure slightly. Numbers above a moving 323 // wave 'i' are unchanged. Numbers below 'j' point directly to their 324 // compacted live range with no further chaining. There are no chains or 325 // cycles below 'i', so the Find call no longer works. 326 uint j=1; 327 uint i; 328 for (i = 1; i < _lrg_map.max_lrg_id(); i++) { 329 uint lr = _lrg_map.uf_live_range_id(i); 330 // Ignore unallocated live ranges 331 if (!lr) { 332 continue; 333 } 334 assert(lr <= i, ""); 335 _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr)); 336 } 337 // Now change the Node->LR mapping to reflect the compacted names 338 uint unique = _lrg_map.size(); 339 for (i = 0; i < unique; i++) { 340 uint lrg_id = _lrg_map.live_range_id(i); 341 _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id)); 342 } 343 344 // Reset the Union-Find mapping 345 _lrg_map.reset_uf_map(j); 346 } 347 348 void PhaseChaitin::Register_Allocate() { 349 350 // Above the OLD FP (and in registers) are the incoming arguments. Stack 351 // slots in this area are called "arg_slots". Above the NEW FP (and in 352 // registers) is the outgoing argument area; above that is the spill/temp 353 // area. These are all "frame_slots". Arg_slots start at the zero 354 // stack_slots and count up to the known arg_size. Frame_slots start at 355 // the stack_slot #arg_size and go up. After allocation I map stack 356 // slots to actual offsets. Stack-slots in the arg_slot area are biased 357 // by the frame_size; stack-slots in the frame_slot area are biased by 0. 358 359 _trip_cnt = 0; 360 _alternate = 0; 361 _matcher._allocation_started = true; 362 363 ResourceArea split_arena; // Arena for Split local resources 364 ResourceArea live_arena; // Arena for liveness & IFG info 365 ResourceMark rm(&live_arena); 366 367 // Need live-ness for the IFG; need the IFG for coalescing. If the 368 // liveness is JUST for coalescing, then I can get some mileage by renaming 369 // all copy-related live ranges low and then using the max copy-related 370 // live range as a cut-off for LIVE and the IFG. In other words, I can 371 // build a subset of LIVE and IFG just for copies. 372 PhaseLive live(_cfg, _lrg_map.names(), &live_arena); 373 374 // Need IFG for coalescing and coloring 375 PhaseIFG ifg(&live_arena); 376 _ifg = &ifg; 377 378 // Come out of SSA world to the Named world. Assign (virtual) registers to 379 // Nodes. Use the same register for all inputs and the output of PhiNodes 380 // - effectively ending SSA form. This requires either coalescing live 381 // ranges or inserting copies. For the moment, we insert "virtual copies" 382 // - we pretend there is a copy prior to each Phi in predecessor blocks. 383 // We will attempt to coalesce such "virtual copies" before we manifest 384 // them for real. 385 de_ssa(); 386 387 #ifdef ASSERT 388 // Veify the graph before RA. 389 verify(&live_arena); 390 #endif 391 392 { 393 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); ) 394 _live = NULL; // Mark live as being not available 395 rm.reset_to_mark(); // Reclaim working storage 396 IndexSet::reset_memory(C, &live_arena); 397 ifg.init(_lrg_map.max_lrg_id()); // Empty IFG 398 gather_lrg_masks( false ); // Collect LRG masks 399 live.compute(_lrg_map.max_lrg_id()); // Compute liveness 400 _live = &live; // Mark LIVE as being available 401 } 402 403 // Base pointers are currently "used" by instructions which define new 404 // derived pointers. This makes base pointers live up to the where the 405 // derived pointer is made, but not beyond. Really, they need to be live 406 // across any GC point where the derived value is live. So this code looks 407 // at all the GC points, and "stretches" the live range of any base pointer 408 // to the GC point. 409 if (stretch_base_pointer_live_ranges(&live_arena)) { 410 NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);) 411 // Since some live range stretched, I need to recompute live 412 _live = NULL; 413 rm.reset_to_mark(); // Reclaim working storage 414 IndexSet::reset_memory(C, &live_arena); 415 ifg.init(_lrg_map.max_lrg_id()); 416 gather_lrg_masks(false); 417 live.compute(_lrg_map.max_lrg_id()); 418 _live = &live; 419 } 420 // Create the interference graph using virtual copies 421 build_ifg_virtual(); // Include stack slots this time 422 423 // Aggressive (but pessimistic) copy coalescing. 424 // This pass works on virtual copies. Any virtual copies which are not 425 // coalesced get manifested as actual copies 426 { 427 // The IFG is/was triangular. I am 'squaring it up' so Union can run 428 // faster. Union requires a 'for all' operation which is slow on the 429 // triangular adjacency matrix (quick reminder: the IFG is 'sparse' - 430 // meaning I can visit all the Nodes neighbors less than a Node in time 431 // O(# of neighbors), but I have to visit all the Nodes greater than a 432 // given Node and search them for an instance, i.e., time O(#MaxLRG)). 433 _ifg->SquareUp(); 434 435 PhaseAggressiveCoalesce coalesce(*this); 436 coalesce.coalesce_driver(); 437 // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do 438 // not match the Phi itself, insert a copy. 439 coalesce.insert_copies(_matcher); 440 if (C->failing()) { 441 return; 442 } 443 } 444 445 // After aggressive coalesce, attempt a first cut at coloring. 446 // To color, we need the IFG and for that we need LIVE. 447 { 448 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); ) 449 _live = NULL; 450 rm.reset_to_mark(); // Reclaim working storage 451 IndexSet::reset_memory(C, &live_arena); 452 ifg.init(_lrg_map.max_lrg_id()); 453 gather_lrg_masks( true ); 454 live.compute(_lrg_map.max_lrg_id()); 455 _live = &live; 456 } 457 458 // Build physical interference graph 459 uint must_spill = 0; 460 must_spill = build_ifg_physical(&live_arena); 461 // If we have a guaranteed spill, might as well spill now 462 if (must_spill) { 463 if(!_lrg_map.max_lrg_id()) { 464 return; 465 } 466 // Bail out if unique gets too large (ie - unique > MaxNodeLimit) 467 C->check_node_count(10*must_spill, "out of nodes before split"); 468 if (C->failing()) { 469 return; 470 } 471 472 uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere 473 _lrg_map.set_max_lrg_id(new_max_lrg_id); 474 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) 475 // or we failed to split 476 C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split"); 477 if (C->failing()) { 478 return; 479 } 480 481 NOT_PRODUCT(C->verify_graph_edges();) 482 483 compact(); // Compact LRGs; return new lower max lrg 484 485 { 486 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); ) 487 _live = NULL; 488 rm.reset_to_mark(); // Reclaim working storage 489 IndexSet::reset_memory(C, &live_arena); 490 ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph 491 gather_lrg_masks( true ); // Collect intersect mask 492 live.compute(_lrg_map.max_lrg_id()); // Compute LIVE 493 _live = &live; 494 } 495 build_ifg_physical(&live_arena); 496 _ifg->SquareUp(); 497 _ifg->Compute_Effective_Degree(); 498 // Only do conservative coalescing if requested 499 if (OptoCoalesce) { 500 // Conservative (and pessimistic) copy coalescing of those spills 501 PhaseConservativeCoalesce coalesce(*this); 502 // If max live ranges greater than cutoff, don't color the stack. 503 // This cutoff can be larger than below since it is only done once. 504 coalesce.coalesce_driver(); 505 } 506 _lrg_map.compress_uf_map_for_nodes(); 507 508 #ifdef ASSERT 509 verify(&live_arena, true); 510 #endif 511 } else { 512 ifg.SquareUp(); 513 ifg.Compute_Effective_Degree(); 514 #ifdef ASSERT 515 set_was_low(); 516 #endif 517 } 518 519 // Prepare for Simplify & Select 520 cache_lrg_info(); // Count degree of LRGs 521 522 // Simplify the InterFerence Graph by removing LRGs of low degree. 523 // LRGs of low degree are trivially colorable. 524 Simplify(); 525 526 // Select colors by re-inserting LRGs back into the IFG in reverse order. 527 // Return whether or not something spills. 528 uint spills = Select( ); 529 530 // If we spill, split and recycle the entire thing 531 while( spills ) { 532 if( _trip_cnt++ > 24 ) { 533 DEBUG_ONLY( dump_for_spill_split_recycle(); ) 534 if( _trip_cnt > 27 ) { 535 C->record_method_not_compilable("failed spill-split-recycle sanity check"); 536 return; 537 } 538 } 539 540 if (!_lrg_map.max_lrg_id()) { 541 return; 542 } 543 uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere 544 _lrg_map.set_max_lrg_id(new_max_lrg_id); 545 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) 546 C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split"); 547 if (C->failing()) { 548 return; 549 } 550 551 compact(); // Compact LRGs; return new lower max lrg 552 553 // Nuke the live-ness and interference graph and LiveRanGe info 554 { 555 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); ) 556 _live = NULL; 557 rm.reset_to_mark(); // Reclaim working storage 558 IndexSet::reset_memory(C, &live_arena); 559 ifg.init(_lrg_map.max_lrg_id()); 560 561 // Create LiveRanGe array. 562 // Intersect register masks for all USEs and DEFs 563 gather_lrg_masks(true); 564 live.compute(_lrg_map.max_lrg_id()); 565 _live = &live; 566 } 567 must_spill = build_ifg_physical(&live_arena); 568 _ifg->SquareUp(); 569 _ifg->Compute_Effective_Degree(); 570 571 // Only do conservative coalescing if requested 572 if (OptoCoalesce) { 573 // Conservative (and pessimistic) copy coalescing 574 PhaseConservativeCoalesce coalesce(*this); 575 // Check for few live ranges determines how aggressive coalesce is. 576 coalesce.coalesce_driver(); 577 } 578 _lrg_map.compress_uf_map_for_nodes(); 579 #ifdef ASSERT 580 verify(&live_arena, true); 581 #endif 582 cache_lrg_info(); // Count degree of LRGs 583 584 // Simplify the InterFerence Graph by removing LRGs of low degree. 585 // LRGs of low degree are trivially colorable. 586 Simplify(); 587 588 // Select colors by re-inserting LRGs back into the IFG in reverse order. 589 // Return whether or not something spills. 590 spills = Select(); 591 } 592 593 // Count number of Simplify-Select trips per coloring success. 594 _allocator_attempts += _trip_cnt + 1; 595 _allocator_successes += 1; 596 597 // Peephole remove copies 598 post_allocate_copy_removal(); 599 600 #ifdef ASSERT 601 // Veify the graph after RA. 602 verify(&live_arena); 603 #endif 604 605 // max_reg is past the largest *register* used. 606 // Convert that to a frame_slot number. 607 if (_max_reg <= _matcher._new_SP) { 608 _framesize = C->out_preserve_stack_slots(); 609 } 610 else { 611 _framesize = _max_reg -_matcher._new_SP; 612 } 613 assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough"); 614 615 // This frame must preserve the required fp alignment 616 _framesize = round_to(_framesize, Matcher::stack_alignment_in_slots()); 617 assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" ); 618 #ifndef PRODUCT 619 _total_framesize += _framesize; 620 if ((int)_framesize > _max_framesize) { 621 _max_framesize = _framesize; 622 } 623 #endif 624 625 // Convert CISC spills 626 fixup_spills(); 627 628 // Log regalloc results 629 CompileLog* log = Compile::current()->log(); 630 if (log != NULL) { 631 log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing()); 632 } 633 634 if (C->failing()) { 635 return; 636 } 637 638 NOT_PRODUCT(C->verify_graph_edges();) 639 640 // Move important info out of the live_arena to longer lasting storage. 641 alloc_node_regs(_lrg_map.size()); 642 for (uint i=0; i < _lrg_map.size(); i++) { 643 if (_lrg_map.live_range_id(i)) { // Live range associated with Node? 644 LRG &lrg = lrgs(_lrg_map.live_range_id(i)); 645 if (!lrg.alive()) { 646 set_bad(i); 647 } else if (lrg.num_regs() == 1) { 648 set1(i, lrg.reg()); 649 } else { // Must be a register-set 650 if (!lrg._fat_proj) { // Must be aligned adjacent register set 651 // Live ranges record the highest register in their mask. 652 // We want the low register for the AD file writer's convenience. 653 OptoReg::Name hi = lrg.reg(); // Get hi register 654 OptoReg::Name lo = OptoReg::add(hi, (1-lrg.num_regs())); // Find lo 655 // We have to use pair [lo,lo+1] even for wide vectors because 656 // the rest of code generation works only with pairs. It is safe 657 // since for registers encoding only 'lo' is used. 658 // Second reg from pair is used in ScheduleAndBundle on SPARC where 659 // vector max size is 8 which corresponds to registers pair. 660 // It is also used in BuildOopMaps but oop operations are not 661 // vectorized. 662 set2(i, lo); 663 } else { // Misaligned; extract 2 bits 664 OptoReg::Name hi = lrg.reg(); // Get hi register 665 lrg.Remove(hi); // Yank from mask 666 int lo = lrg.mask().find_first_elem(); // Find lo 667 set_pair(i, hi, lo); 668 } 669 } 670 if( lrg._is_oop ) _node_oops.set(i); 671 } else { 672 set_bad(i); 673 } 674 } 675 676 // Done! 677 _live = NULL; 678 _ifg = NULL; 679 C->set_indexSet_arena(NULL); // ResourceArea is at end of scope 680 } 681 682 //------------------------------de_ssa----------------------------------------- 683 void PhaseChaitin::de_ssa() { 684 // Set initial Names for all Nodes. Most Nodes get the virtual register 685 // number. A few get the ZERO live range number. These do not 686 // get allocated, but instead rely on correct scheduling to ensure that 687 // only one instance is simultaneously live at a time. 688 uint lr_counter = 1; 689 for( uint i = 0; i < _cfg._num_blocks; i++ ) { 690 Block *b = _cfg._blocks[i]; 691 uint cnt = b->_nodes.size(); 692 693 // Handle all the normal Nodes in the block 694 for( uint j = 0; j < cnt; j++ ) { 695 Node *n = b->_nodes[j]; 696 // Pre-color to the zero live range, or pick virtual register 697 const RegMask &rm = n->out_RegMask(); 698 _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0); 699 } 700 } 701 // Reset the Union-Find mapping to be identity 702 _lrg_map.reset_uf_map(lr_counter); 703 } 704 705 706 //------------------------------gather_lrg_masks------------------------------- 707 // Gather LiveRanGe information, including register masks. Modification of 708 // cisc spillable in_RegMasks should not be done before AggressiveCoalesce. 709 void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { 710 711 // Nail down the frame pointer live range 712 uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr)); 713 lrgs(fp_lrg)._cost += 1e12; // Cost is infinite 714 715 // For all blocks 716 for( uint i = 0; i < _cfg._num_blocks; i++ ) { 717 Block *b = _cfg._blocks[i]; 718 719 // For all instructions 720 for( uint j = 1; j < b->_nodes.size(); j++ ) { 721 Node *n = b->_nodes[j]; 722 uint input_edge_start =1; // Skip control most nodes 723 if( n->is_Mach() ) input_edge_start = n->as_Mach()->oper_input_base(); 724 uint idx = n->is_Copy(); 725 726 // Get virtual register number, same as LiveRanGe index 727 uint vreg = _lrg_map.live_range_id(n); 728 LRG &lrg = lrgs(vreg); 729 if( vreg ) { // No vreg means un-allocable (e.g. memory) 730 731 // Collect has-copy bit 732 if( idx ) { 733 lrg._has_copy = 1; 734 uint clidx = _lrg_map.live_range_id(n->in(idx)); 735 LRG ©_src = lrgs(clidx); 736 copy_src._has_copy = 1; 737 } 738 739 // Check for float-vs-int live range (used in register-pressure 740 // calculations) 741 const Type *n_type = n->bottom_type(); 742 if (n_type->is_floatingpoint()) 743 lrg._is_float = 1; 744 745 // Check for twice prior spilling. Once prior spilling might have 746 // spilled 'soft', 2nd prior spill should have spilled 'hard' and 747 // further spilling is unlikely to make progress. 748 if( _spilled_once.test(n->_idx) ) { 749 lrg._was_spilled1 = 1; 750 if( _spilled_twice.test(n->_idx) ) 751 lrg._was_spilled2 = 1; 752 } 753 754 #ifndef PRODUCT 755 if (trace_spilling() && lrg._def != NULL) { 756 // collect defs for MultiDef printing 757 if (lrg._defs == NULL) { 758 lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL); 759 lrg._defs->append(lrg._def); 760 } 761 lrg._defs->append(n); 762 } 763 #endif 764 765 // Check for a single def LRG; these can spill nicely 766 // via rematerialization. Flag as NULL for no def found 767 // yet, or 'n' for single def or -1 for many defs. 768 lrg._def = lrg._def ? NodeSentinel : n; 769 770 // Limit result register mask to acceptable registers 771 const RegMask &rm = n->out_RegMask(); 772 lrg.AND( rm ); 773 774 int ireg = n->ideal_reg(); 775 assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP, 776 "oops must be in Op_RegP's" ); 777 778 // Check for vector live range (only if vector register is used). 779 // On SPARC vector uses RegD which could be misaligned so it is not 780 // processes as vector in RA. 781 if (RegMask::is_vector(ireg)) 782 lrg._is_vector = 1; 783 assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD, 784 "vector must be in vector registers"); 785 786 // Check for bound register masks 787 const RegMask &lrgmask = lrg.mask(); 788 if (lrgmask.is_bound(ireg)) 789 lrg._is_bound = 1; 790 791 // Check for maximum frequency value 792 if (lrg._maxfreq < b->_freq) 793 lrg._maxfreq = b->_freq; 794 795 // Check for oop-iness, or long/double 796 // Check for multi-kill projection 797 switch( ireg ) { 798 case MachProjNode::fat_proj: 799 // Fat projections have size equal to number of registers killed 800 lrg.set_num_regs(rm.Size()); 801 lrg.set_reg_pressure(lrg.num_regs()); 802 lrg._fat_proj = 1; 803 lrg._is_bound = 1; 804 break; 805 case Op_RegP: 806 #ifdef _LP64 807 lrg.set_num_regs(2); // Size is 2 stack words 808 #else 809 lrg.set_num_regs(1); // Size is 1 stack word 810 #endif 811 // Register pressure is tracked relative to the maximum values 812 // suggested for that platform, INTPRESSURE and FLOATPRESSURE, 813 // and relative to other types which compete for the same regs. 814 // 815 // The following table contains suggested values based on the 816 // architectures as defined in each .ad file. 817 // INTPRESSURE and FLOATPRESSURE may be tuned differently for 818 // compile-speed or performance. 819 // Note1: 820 // SPARC and SPARCV9 reg_pressures are at 2 instead of 1 821 // since .ad registers are defined as high and low halves. 822 // These reg_pressure values remain compatible with the code 823 // in is_high_pressure() which relates get_invalid_mask_size(), 824 // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE. 825 // Note2: 826 // SPARC -d32 has 24 registers available for integral values, 827 // but only 10 of these are safe for 64-bit longs. 828 // Using set_reg_pressure(2) for both int and long means 829 // the allocator will believe it can fit 26 longs into 830 // registers. Using 2 for longs and 1 for ints means the 831 // allocator will attempt to put 52 integers into registers. 832 // The settings below limit this problem to methods with 833 // many long values which are being run on 32-bit SPARC. 834 // 835 // ------------------- reg_pressure -------------------- 836 // Each entry is reg_pressure_per_value,number_of_regs 837 // RegL RegI RegFlags RegF RegD INTPRESSURE FLOATPRESSURE 838 // IA32 2 1 1 1 1 6 6 839 // IA64 1 1 1 1 1 50 41 840 // SPARC 2 2 2 2 2 48 (24) 52 (26) 841 // SPARCV9 2 2 2 2 2 48 (24) 52 (26) 842 // AMD64 1 1 1 1 1 14 15 843 // ----------------------------------------------------- 844 #if defined(SPARC) 845 lrg.set_reg_pressure(2); // use for v9 as well 846 #else 847 lrg.set_reg_pressure(1); // normally one value per register 848 #endif 849 if( n_type->isa_oop_ptr() ) { 850 lrg._is_oop = 1; 851 } 852 break; 853 case Op_RegL: // Check for long or double 854 case Op_RegD: 855 lrg.set_num_regs(2); 856 // Define platform specific register pressure 857 #if defined(SPARC) || defined(ARM) 858 lrg.set_reg_pressure(2); 859 #elif defined(IA32) 860 if( ireg == Op_RegL ) { 861 lrg.set_reg_pressure(2); 862 } else { 863 lrg.set_reg_pressure(1); 864 } 865 #else 866 lrg.set_reg_pressure(1); // normally one value per register 867 #endif 868 // If this def of a double forces a mis-aligned double, 869 // flag as '_fat_proj' - really flag as allowing misalignment 870 // AND changes how we count interferences. A mis-aligned 871 // double can interfere with TWO aligned pairs, or effectively 872 // FOUR registers! 873 if (rm.is_misaligned_pair()) { 874 lrg._fat_proj = 1; 875 lrg._is_bound = 1; 876 } 877 break; 878 case Op_RegF: 879 case Op_RegI: 880 case Op_RegN: 881 case Op_RegFlags: 882 case 0: // not an ideal register 883 lrg.set_num_regs(1); 884 #ifdef SPARC 885 lrg.set_reg_pressure(2); 886 #else 887 lrg.set_reg_pressure(1); 888 #endif 889 break; 890 case Op_VecS: 891 assert(Matcher::vector_size_supported(T_BYTE,4), "sanity"); 892 assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity"); 893 lrg.set_num_regs(RegMask::SlotsPerVecS); 894 lrg.set_reg_pressure(1); 895 break; 896 case Op_VecD: 897 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity"); 898 assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity"); 899 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned"); 900 lrg.set_num_regs(RegMask::SlotsPerVecD); 901 lrg.set_reg_pressure(1); 902 break; 903 case Op_VecX: 904 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity"); 905 assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity"); 906 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned"); 907 lrg.set_num_regs(RegMask::SlotsPerVecX); 908 lrg.set_reg_pressure(1); 909 break; 910 case Op_VecY: 911 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity"); 912 assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity"); 913 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned"); 914 lrg.set_num_regs(RegMask::SlotsPerVecY); 915 lrg.set_reg_pressure(1); 916 break; 917 default: 918 ShouldNotReachHere(); 919 } 920 } 921 922 // Now do the same for inputs 923 uint cnt = n->req(); 924 // Setup for CISC SPILLING 925 uint inp = (uint)AdlcVMDeps::Not_cisc_spillable; 926 if( UseCISCSpill && after_aggressive ) { 927 inp = n->cisc_operand(); 928 if( inp != (uint)AdlcVMDeps::Not_cisc_spillable ) 929 // Convert operand number to edge index number 930 inp = n->as_Mach()->operand_index(inp); 931 } 932 // Prepare register mask for each input 933 for( uint k = input_edge_start; k < cnt; k++ ) { 934 uint vreg = _lrg_map.live_range_id(n->in(k)); 935 if (!vreg) { 936 continue; 937 } 938 939 // If this instruction is CISC Spillable, add the flags 940 // bit to its appropriate input 941 if( UseCISCSpill && after_aggressive && inp == k ) { 942 #ifndef PRODUCT 943 if( TraceCISCSpill ) { 944 tty->print(" use_cisc_RegMask: "); 945 n->dump(); 946 } 947 #endif 948 n->as_Mach()->use_cisc_RegMask(); 949 } 950 951 LRG &lrg = lrgs(vreg); 952 // // Testing for floating point code shape 953 // Node *test = n->in(k); 954 // if( test->is_Mach() ) { 955 // MachNode *m = test->as_Mach(); 956 // int op = m->ideal_Opcode(); 957 // if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) { 958 // int zzz = 1; 959 // } 960 // } 961 962 // Limit result register mask to acceptable registers. 963 // Do not limit registers from uncommon uses before 964 // AggressiveCoalesce. This effectively pre-virtual-splits 965 // around uncommon uses of common defs. 966 const RegMask &rm = n->in_RegMask(k); 967 if( !after_aggressive && 968 _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) { 969 // Since we are BEFORE aggressive coalesce, leave the register 970 // mask untrimmed by the call. This encourages more coalescing. 971 // Later, AFTER aggressive, this live range will have to spill 972 // but the spiller handles slow-path calls very nicely. 973 } else { 974 lrg.AND( rm ); 975 } 976 977 // Check for bound register masks 978 const RegMask &lrgmask = lrg.mask(); 979 int kreg = n->in(k)->ideal_reg(); 980 bool is_vect = RegMask::is_vector(kreg); 981 assert(n->in(k)->bottom_type()->isa_vect() == NULL || 982 is_vect || kreg == Op_RegD, 983 "vector must be in vector registers"); 984 if (lrgmask.is_bound(kreg)) 985 lrg._is_bound = 1; 986 987 // If this use of a double forces a mis-aligned double, 988 // flag as '_fat_proj' - really flag as allowing misalignment 989 // AND changes how we count interferences. A mis-aligned 990 // double can interfere with TWO aligned pairs, or effectively 991 // FOUR registers! 992 #ifdef ASSERT 993 if (is_vect) { 994 assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned"); 995 assert(!lrg._fat_proj, "sanity"); 996 assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity"); 997 } 998 #endif 999 if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) { 1000 lrg._fat_proj = 1; 1001 lrg._is_bound = 1; 1002 } 1003 // if the LRG is an unaligned pair, we will have to spill 1004 // so clear the LRG's register mask if it is not already spilled 1005 if (!is_vect && !n->is_SpillCopy() && 1006 (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) && 1007 lrgmask.is_misaligned_pair()) { 1008 lrg.Clear(); 1009 } 1010 1011 // Check for maximum frequency value 1012 if( lrg._maxfreq < b->_freq ) 1013 lrg._maxfreq = b->_freq; 1014 1015 } // End for all allocated inputs 1016 } // end for all instructions 1017 } // end for all blocks 1018 1019 // Final per-liverange setup 1020 for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) { 1021 LRG &lrg = lrgs(i2); 1022 assert(!lrg._is_vector || !lrg._fat_proj, "sanity"); 1023 if (lrg.num_regs() > 1 && !lrg._fat_proj) { 1024 lrg.clear_to_sets(); 1025 } 1026 lrg.compute_set_mask_size(); 1027 if (lrg.not_free()) { // Handle case where we lose from the start 1028 lrg.set_reg(OptoReg::Name(LRG::SPILL_REG)); 1029 lrg._direct_conflict = 1; 1030 } 1031 lrg.set_degree(0); // no neighbors in IFG yet 1032 } 1033 } 1034 1035 //------------------------------set_was_low------------------------------------ 1036 // Set the was-lo-degree bit. Conservative coalescing should not change the 1037 // colorability of the graph. If any live range was of low-degree before 1038 // coalescing, it should Simplify. This call sets the was-lo-degree bit. 1039 // The bit is checked in Simplify. 1040 void PhaseChaitin::set_was_low() { 1041 #ifdef ASSERT 1042 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { 1043 int size = lrgs(i).num_regs(); 1044 uint old_was_lo = lrgs(i)._was_lo; 1045 lrgs(i)._was_lo = 0; 1046 if( lrgs(i).lo_degree() ) { 1047 lrgs(i)._was_lo = 1; // Trivially of low degree 1048 } else { // Else check the Brigg's assertion 1049 // Brigg's observation is that the lo-degree neighbors of a 1050 // hi-degree live range will not interfere with the color choices 1051 // of said hi-degree live range. The Simplify reverse-stack-coloring 1052 // order takes care of the details. Hence you do not have to count 1053 // low-degree neighbors when determining if this guy colors. 1054 int briggs_degree = 0; 1055 IndexSet *s = _ifg->neighbors(i); 1056 IndexSetIterator elements(s); 1057 uint lidx; 1058 while((lidx = elements.next()) != 0) { 1059 if( !lrgs(lidx).lo_degree() ) 1060 briggs_degree += MAX2(size,lrgs(lidx).num_regs()); 1061 } 1062 if( briggs_degree < lrgs(i).degrees_of_freedom() ) 1063 lrgs(i)._was_lo = 1; // Low degree via the briggs assertion 1064 } 1065 assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease"); 1066 } 1067 #endif 1068 } 1069 1070 #define REGISTER_CONSTRAINED 16 1071 1072 //------------------------------cache_lrg_info--------------------------------- 1073 // Compute cost/area ratio, in case we spill. Build the lo-degree list. 1074 void PhaseChaitin::cache_lrg_info( ) { 1075 1076 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { 1077 LRG &lrg = lrgs(i); 1078 1079 // Check for being of low degree: means we can be trivially colored. 1080 // Low degree, dead or must-spill guys just get to simplify right away 1081 if( lrg.lo_degree() || 1082 !lrg.alive() || 1083 lrg._must_spill ) { 1084 // Split low degree list into those guys that must get a 1085 // register and those that can go to register or stack. 1086 // The idea is LRGs that can go register or stack color first when 1087 // they have a good chance of getting a register. The register-only 1088 // lo-degree live ranges always get a register. 1089 OptoReg::Name hi_reg = lrg.mask().find_last_elem(); 1090 if( OptoReg::is_stack(hi_reg)) { // Can go to stack? 1091 lrg._next = _lo_stk_degree; 1092 _lo_stk_degree = i; 1093 } else { 1094 lrg._next = _lo_degree; 1095 _lo_degree = i; 1096 } 1097 } else { // Else high degree 1098 lrgs(_hi_degree)._prev = i; 1099 lrg._next = _hi_degree; 1100 lrg._prev = 0; 1101 _hi_degree = i; 1102 } 1103 } 1104 } 1105 1106 //------------------------------Pre-Simplify----------------------------------- 1107 // Simplify the IFG by removing LRGs of low degree that have NO copies 1108 void PhaseChaitin::Pre_Simplify( ) { 1109 1110 // Warm up the lo-degree no-copy list 1111 int lo_no_copy = 0; 1112 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { 1113 if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) || 1114 !lrgs(i).alive() || 1115 lrgs(i)._must_spill) { 1116 lrgs(i)._next = lo_no_copy; 1117 lo_no_copy = i; 1118 } 1119 } 1120 1121 while( lo_no_copy ) { 1122 uint lo = lo_no_copy; 1123 lo_no_copy = lrgs(lo)._next; 1124 int size = lrgs(lo).num_regs(); 1125 1126 // Put the simplified guy on the simplified list. 1127 lrgs(lo)._next = _simplified; 1128 _simplified = lo; 1129 1130 // Yank this guy from the IFG. 1131 IndexSet *adj = _ifg->remove_node( lo ); 1132 1133 // If any neighbors' degrees fall below their number of 1134 // allowed registers, then put that neighbor on the low degree 1135 // list. Note that 'degree' can only fall and 'numregs' is 1136 // unchanged by this action. Thus the two are equal at most once, 1137 // so LRGs hit the lo-degree worklists at most once. 1138 IndexSetIterator elements(adj); 1139 uint neighbor; 1140 while ((neighbor = elements.next()) != 0) { 1141 LRG *n = &lrgs(neighbor); 1142 assert( _ifg->effective_degree(neighbor) == n->degree(), "" ); 1143 1144 // Check for just becoming of-low-degree 1145 if( n->just_lo_degree() && !n->_has_copy ) { 1146 assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice"); 1147 // Put on lo-degree list 1148 n->_next = lo_no_copy; 1149 lo_no_copy = neighbor; 1150 } 1151 } 1152 } // End of while lo-degree no_copy worklist not empty 1153 1154 // No more lo-degree no-copy live ranges to simplify 1155 } 1156 1157 //------------------------------Simplify--------------------------------------- 1158 // Simplify the IFG by removing LRGs of low degree. 1159 void PhaseChaitin::Simplify( ) { 1160 1161 while( 1 ) { // Repeat till simplified it all 1162 // May want to explore simplifying lo_degree before _lo_stk_degree. 1163 // This might result in more spills coloring into registers during 1164 // Select(). 1165 while( _lo_degree || _lo_stk_degree ) { 1166 // If possible, pull from lo_stk first 1167 uint lo; 1168 if( _lo_degree ) { 1169 lo = _lo_degree; 1170 _lo_degree = lrgs(lo)._next; 1171 } else { 1172 lo = _lo_stk_degree; 1173 _lo_stk_degree = lrgs(lo)._next; 1174 } 1175 1176 // Put the simplified guy on the simplified list. 1177 lrgs(lo)._next = _simplified; 1178 _simplified = lo; 1179 // If this guy is "at risk" then mark his current neighbors 1180 if( lrgs(lo)._at_risk ) { 1181 IndexSetIterator elements(_ifg->neighbors(lo)); 1182 uint datum; 1183 while ((datum = elements.next()) != 0) { 1184 lrgs(datum)._risk_bias = lo; 1185 } 1186 } 1187 1188 // Yank this guy from the IFG. 1189 IndexSet *adj = _ifg->remove_node( lo ); 1190 1191 // If any neighbors' degrees fall below their number of 1192 // allowed registers, then put that neighbor on the low degree 1193 // list. Note that 'degree' can only fall and 'numregs' is 1194 // unchanged by this action. Thus the two are equal at most once, 1195 // so LRGs hit the lo-degree worklist at most once. 1196 IndexSetIterator elements(adj); 1197 uint neighbor; 1198 while ((neighbor = elements.next()) != 0) { 1199 LRG *n = &lrgs(neighbor); 1200 #ifdef ASSERT 1201 if( VerifyOpto || VerifyRegisterAllocator ) { 1202 assert( _ifg->effective_degree(neighbor) == n->degree(), "" ); 1203 } 1204 #endif 1205 1206 // Check for just becoming of-low-degree just counting registers. 1207 // _must_spill live ranges are already on the low degree list. 1208 if( n->just_lo_degree() && !n->_must_spill ) { 1209 assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice"); 1210 // Pull from hi-degree list 1211 uint prev = n->_prev; 1212 uint next = n->_next; 1213 if( prev ) lrgs(prev)._next = next; 1214 else _hi_degree = next; 1215 lrgs(next)._prev = prev; 1216 n->_next = _lo_degree; 1217 _lo_degree = neighbor; 1218 } 1219 } 1220 } // End of while lo-degree/lo_stk_degree worklist not empty 1221 1222 // Check for got everything: is hi-degree list empty? 1223 if( !_hi_degree ) break; 1224 1225 // Time to pick a potential spill guy 1226 uint lo_score = _hi_degree; 1227 double score = lrgs(lo_score).score(); 1228 double area = lrgs(lo_score)._area; 1229 double cost = lrgs(lo_score)._cost; 1230 bool bound = lrgs(lo_score)._is_bound; 1231 1232 // Find cheapest guy 1233 debug_only( int lo_no_simplify=0; ); 1234 for( uint i = _hi_degree; i; i = lrgs(i)._next ) { 1235 assert( !(*_ifg->_yanked)[i], "" ); 1236 // It's just vaguely possible to move hi-degree to lo-degree without 1237 // going through a just-lo-degree stage: If you remove a double from 1238 // a float live range it's degree will drop by 2 and you can skip the 1239 // just-lo-degree stage. It's very rare (shows up after 5000+ methods 1240 // in -Xcomp of Java2Demo). So just choose this guy to simplify next. 1241 if( lrgs(i).lo_degree() ) { 1242 lo_score = i; 1243 break; 1244 } 1245 debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; ); 1246 double iscore = lrgs(i).score(); 1247 double iarea = lrgs(i)._area; 1248 double icost = lrgs(i)._cost; 1249 bool ibound = lrgs(i)._is_bound; 1250 1251 // Compare cost/area of i vs cost/area of lo_score. Smaller cost/area 1252 // wins. Ties happen because all live ranges in question have spilled 1253 // a few times before and the spill-score adds a huge number which 1254 // washes out the low order bits. We are choosing the lesser of 2 1255 // evils; in this case pick largest area to spill. 1256 // Ties also happen when live ranges are defined and used only inside 1257 // one block. In which case their area is 0 and score set to max. 1258 // In such case choose bound live range over unbound to free registers 1259 // or with smaller cost to spill. 1260 if( iscore < score || 1261 (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) || 1262 (iscore == score && iarea == area && 1263 ( (ibound && !bound) || ibound == bound && (icost < cost) )) ) { 1264 lo_score = i; 1265 score = iscore; 1266 area = iarea; 1267 cost = icost; 1268 bound = ibound; 1269 } 1270 } 1271 LRG *lo_lrg = &lrgs(lo_score); 1272 // The live range we choose for spilling is either hi-degree, or very 1273 // rarely it can be low-degree. If we choose a hi-degree live range 1274 // there better not be any lo-degree choices. 1275 assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" ); 1276 1277 // Pull from hi-degree list 1278 uint prev = lo_lrg->_prev; 1279 uint next = lo_lrg->_next; 1280 if( prev ) lrgs(prev)._next = next; 1281 else _hi_degree = next; 1282 lrgs(next)._prev = prev; 1283 // Jam him on the lo-degree list, despite his high degree. 1284 // Maybe he'll get a color, and maybe he'll spill. 1285 // Only Select() will know. 1286 lrgs(lo_score)._at_risk = true; 1287 _lo_degree = lo_score; 1288 lo_lrg->_next = 0; 1289 1290 } // End of while not simplified everything 1291 1292 } 1293 1294 //------------------------------is_legal_reg----------------------------------- 1295 // Is 'reg' register legal for 'lrg'? 1296 static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) { 1297 if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) && 1298 lrg.mask().Member(OptoReg::add(reg,-chunk))) { 1299 // RA uses OptoReg which represent the highest element of a registers set. 1300 // For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set 1301 // in which XMMd is used by RA to represent such vectors. A double value 1302 // uses [XMM,XMMb] pairs and XMMb is used by RA for it. 1303 // The register mask uses largest bits set of overlapping register sets. 1304 // On x86 with AVX it uses 8 bits for each XMM registers set. 1305 // 1306 // The 'lrg' already has cleared-to-set register mask (done in Select() 1307 // before calling choose_color()). Passing mask.Member(reg) check above 1308 // indicates that the size (num_regs) of 'reg' set is less or equal to 1309 // 'lrg' set size. 1310 // For set size 1 any register which is member of 'lrg' mask is legal. 1311 if (lrg.num_regs()==1) 1312 return true; 1313 // For larger sets only an aligned register with the same set size is legal. 1314 int mask = lrg.num_regs()-1; 1315 if ((reg&mask) == mask) 1316 return true; 1317 } 1318 return false; 1319 } 1320 1321 //------------------------------bias_color------------------------------------- 1322 // Choose a color using the biasing heuristic 1323 OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) { 1324 1325 // Check for "at_risk" LRG's 1326 uint risk_lrg = _lrg_map.find(lrg._risk_bias); 1327 if( risk_lrg != 0 ) { 1328 // Walk the colored neighbors of the "at_risk" candidate 1329 // Choose a color which is both legal and already taken by a neighbor 1330 // of the "at_risk" candidate in order to improve the chances of the 1331 // "at_risk" candidate of coloring 1332 IndexSetIterator elements(_ifg->neighbors(risk_lrg)); 1333 uint datum; 1334 while ((datum = elements.next()) != 0) { 1335 OptoReg::Name reg = lrgs(datum).reg(); 1336 // If this LRG's register is legal for us, choose it 1337 if (is_legal_reg(lrg, reg, chunk)) 1338 return reg; 1339 } 1340 } 1341 1342 uint copy_lrg = _lrg_map.find(lrg._copy_bias); 1343 if( copy_lrg != 0 ) { 1344 // If he has a color, 1345 if( !(*(_ifg->_yanked))[copy_lrg] ) { 1346 OptoReg::Name reg = lrgs(copy_lrg).reg(); 1347 // And it is legal for you, 1348 if (is_legal_reg(lrg, reg, chunk)) 1349 return reg; 1350 } else if( chunk == 0 ) { 1351 // Choose a color which is legal for him 1352 RegMask tempmask = lrg.mask(); 1353 tempmask.AND(lrgs(copy_lrg).mask()); 1354 tempmask.clear_to_sets(lrg.num_regs()); 1355 OptoReg::Name reg = tempmask.find_first_set(lrg.num_regs()); 1356 if (OptoReg::is_valid(reg)) 1357 return reg; 1358 } 1359 } 1360 1361 // If no bias info exists, just go with the register selection ordering 1362 if (lrg._is_vector || lrg.num_regs() == 2) { 1363 // Find an aligned set 1364 return OptoReg::add(lrg.mask().find_first_set(lrg.num_regs()),chunk); 1365 } 1366 1367 // CNC - Fun hack. Alternate 1st and 2nd selection. Enables post-allocate 1368 // copy removal to remove many more copies, by preventing a just-assigned 1369 // register from being repeatedly assigned. 1370 OptoReg::Name reg = lrg.mask().find_first_elem(); 1371 if( (++_alternate & 1) && OptoReg::is_valid(reg) ) { 1372 // This 'Remove; find; Insert' idiom is an expensive way to find the 1373 // SECOND element in the mask. 1374 lrg.Remove(reg); 1375 OptoReg::Name reg2 = lrg.mask().find_first_elem(); 1376 lrg.Insert(reg); 1377 if( OptoReg::is_reg(reg2)) 1378 reg = reg2; 1379 } 1380 return OptoReg::add( reg, chunk ); 1381 } 1382 1383 //------------------------------choose_color----------------------------------- 1384 // Choose a color in the current chunk 1385 OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) { 1386 assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)"); 1387 assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)"); 1388 1389 if( lrg.num_regs() == 1 || // Common Case 1390 !lrg._fat_proj ) // Aligned+adjacent pairs ok 1391 // Use a heuristic to "bias" the color choice 1392 return bias_color(lrg, chunk); 1393 1394 assert(!lrg._is_vector, "should be not vector here" ); 1395 assert( lrg.num_regs() >= 2, "dead live ranges do not color" ); 1396 1397 // Fat-proj case or misaligned double argument. 1398 assert(lrg.compute_mask_size() == lrg.num_regs() || 1399 lrg.num_regs() == 2,"fat projs exactly color" ); 1400 assert( !chunk, "always color in 1st chunk" ); 1401 // Return the highest element in the set. 1402 return lrg.mask().find_last_elem(); 1403 } 1404 1405 //------------------------------Select----------------------------------------- 1406 // Select colors by re-inserting LRGs back into the IFG. LRGs are re-inserted 1407 // in reverse order of removal. As long as nothing of hi-degree was yanked, 1408 // everything going back is guaranteed a color. Select that color. If some 1409 // hi-degree LRG cannot get a color then we record that we must spill. 1410 uint PhaseChaitin::Select( ) { 1411 uint spill_reg = LRG::SPILL_REG; 1412 _max_reg = OptoReg::Name(0); // Past max register used 1413 while( _simplified ) { 1414 // Pull next LRG from the simplified list - in reverse order of removal 1415 uint lidx = _simplified; 1416 LRG *lrg = &lrgs(lidx); 1417 _simplified = lrg->_next; 1418 1419 1420 #ifndef PRODUCT 1421 if (trace_spilling()) { 1422 ttyLocker ttyl; 1423 tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(), 1424 lrg->degrees_of_freedom()); 1425 lrg->dump(); 1426 } 1427 #endif 1428 1429 // Re-insert into the IFG 1430 _ifg->re_insert(lidx); 1431 if( !lrg->alive() ) continue; 1432 // capture allstackedness flag before mask is hacked 1433 const int is_allstack = lrg->mask().is_AllStack(); 1434 1435 // Yeah, yeah, yeah, I know, I know. I can refactor this 1436 // to avoid the GOTO, although the refactored code will not 1437 // be much clearer. We arrive here IFF we have a stack-based 1438 // live range that cannot color in the current chunk, and it 1439 // has to move into the next free stack chunk. 1440 int chunk = 0; // Current chunk is first chunk 1441 retry_next_chunk: 1442 1443 // Remove neighbor colors 1444 IndexSet *s = _ifg->neighbors(lidx); 1445 1446 debug_only(RegMask orig_mask = lrg->mask();) 1447 IndexSetIterator elements(s); 1448 uint neighbor; 1449 while ((neighbor = elements.next()) != 0) { 1450 // Note that neighbor might be a spill_reg. In this case, exclusion 1451 // of its color will be a no-op, since the spill_reg chunk is in outer 1452 // space. Also, if neighbor is in a different chunk, this exclusion 1453 // will be a no-op. (Later on, if lrg runs out of possible colors in 1454 // its chunk, a new chunk of color may be tried, in which case 1455 // examination of neighbors is started again, at retry_next_chunk.) 1456 LRG &nlrg = lrgs(neighbor); 1457 OptoReg::Name nreg = nlrg.reg(); 1458 // Only subtract masks in the same chunk 1459 if( nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE ) { 1460 #ifndef PRODUCT 1461 uint size = lrg->mask().Size(); 1462 RegMask rm = lrg->mask(); 1463 #endif 1464 lrg->SUBTRACT(nlrg.mask()); 1465 #ifndef PRODUCT 1466 if (trace_spilling() && lrg->mask().Size() != size) { 1467 ttyLocker ttyl; 1468 tty->print("L%d ", lidx); 1469 rm.dump(); 1470 tty->print(" intersected L%d ", neighbor); 1471 nlrg.mask().dump(); 1472 tty->print(" removed "); 1473 rm.SUBTRACT(lrg->mask()); 1474 rm.dump(); 1475 tty->print(" leaving "); 1476 lrg->mask().dump(); 1477 tty->cr(); 1478 } 1479 #endif 1480 } 1481 } 1482 //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness"); 1483 // Aligned pairs need aligned masks 1484 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity"); 1485 if (lrg->num_regs() > 1 && !lrg->_fat_proj) { 1486 lrg->clear_to_sets(); 1487 } 1488 1489 // Check if a color is available and if so pick the color 1490 OptoReg::Name reg = choose_color( *lrg, chunk ); 1491 #ifdef SPARC 1492 debug_only(lrg->compute_set_mask_size()); 1493 assert(lrg->num_regs() < 2 || lrg->is_bound() || is_even(reg-1), "allocate all doubles aligned"); 1494 #endif 1495 1496 //--------------- 1497 // If we fail to color and the AllStack flag is set, trigger 1498 // a chunk-rollover event 1499 if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) { 1500 // Bump register mask up to next stack chunk 1501 chunk += RegMask::CHUNK_SIZE; 1502 lrg->Set_All(); 1503 1504 goto retry_next_chunk; 1505 } 1506 1507 //--------------- 1508 // Did we get a color? 1509 else if( OptoReg::is_valid(reg)) { 1510 #ifndef PRODUCT 1511 RegMask avail_rm = lrg->mask(); 1512 #endif 1513 1514 // Record selected register 1515 lrg->set_reg(reg); 1516 1517 if( reg >= _max_reg ) // Compute max register limit 1518 _max_reg = OptoReg::add(reg,1); 1519 // Fold reg back into normal space 1520 reg = OptoReg::add(reg,-chunk); 1521 1522 // If the live range is not bound, then we actually had some choices 1523 // to make. In this case, the mask has more bits in it than the colors 1524 // chosen. Restrict the mask to just what was picked. 1525 int n_regs = lrg->num_regs(); 1526 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity"); 1527 if (n_regs == 1 || !lrg->_fat_proj) { 1528 assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecY, "sanity"); 1529 lrg->Clear(); // Clear the mask 1530 lrg->Insert(reg); // Set regmask to match selected reg 1531 // For vectors and pairs, also insert the low bit of the pair 1532 for (int i = 1; i < n_regs; i++) 1533 lrg->Insert(OptoReg::add(reg,-i)); 1534 lrg->set_mask_size(n_regs); 1535 } else { // Else fatproj 1536 // mask must be equal to fatproj bits, by definition 1537 } 1538 #ifndef PRODUCT 1539 if (trace_spilling()) { 1540 ttyLocker ttyl; 1541 tty->print("L%d selected ", lidx); 1542 lrg->mask().dump(); 1543 tty->print(" from "); 1544 avail_rm.dump(); 1545 tty->cr(); 1546 } 1547 #endif 1548 // Note that reg is the highest-numbered register in the newly-bound mask. 1549 } // end color available case 1550 1551 //--------------- 1552 // Live range is live and no colors available 1553 else { 1554 assert( lrg->alive(), "" ); 1555 assert( !lrg->_fat_proj || lrg->is_multidef() || 1556 lrg->_def->outcnt() > 0, "fat_proj cannot spill"); 1557 assert( !orig_mask.is_AllStack(), "All Stack does not spill" ); 1558 1559 // Assign the special spillreg register 1560 lrg->set_reg(OptoReg::Name(spill_reg++)); 1561 // Do not empty the regmask; leave mask_size lying around 1562 // for use during Spilling 1563 #ifndef PRODUCT 1564 if( trace_spilling() ) { 1565 ttyLocker ttyl; 1566 tty->print("L%d spilling with neighbors: ", lidx); 1567 s->dump(); 1568 debug_only(tty->print(" original mask: ")); 1569 debug_only(orig_mask.dump()); 1570 dump_lrg(lidx); 1571 } 1572 #endif 1573 } // end spill case 1574 1575 } 1576 1577 return spill_reg-LRG::SPILL_REG; // Return number of spills 1578 } 1579 1580 1581 //------------------------------copy_was_spilled------------------------------- 1582 // Copy 'was_spilled'-edness from the source Node to the dst Node. 1583 void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) { 1584 if( _spilled_once.test(src->_idx) ) { 1585 _spilled_once.set(dst->_idx); 1586 lrgs(_lrg_map.find(dst))._was_spilled1 = 1; 1587 if( _spilled_twice.test(src->_idx) ) { 1588 _spilled_twice.set(dst->_idx); 1589 lrgs(_lrg_map.find(dst))._was_spilled2 = 1; 1590 } 1591 } 1592 } 1593 1594 //------------------------------set_was_spilled-------------------------------- 1595 // Set the 'spilled_once' or 'spilled_twice' flag on a node. 1596 void PhaseChaitin::set_was_spilled( Node *n ) { 1597 if( _spilled_once.test_set(n->_idx) ) 1598 _spilled_twice.set(n->_idx); 1599 } 1600 1601 //------------------------------fixup_spills----------------------------------- 1602 // Convert Ideal spill instructions into proper FramePtr + offset Loads and 1603 // Stores. Use-def chains are NOT preserved, but Node->LRG->reg maps are. 1604 void PhaseChaitin::fixup_spills() { 1605 // This function does only cisc spill work. 1606 if( !UseCISCSpill ) return; 1607 1608 NOT_PRODUCT( Compile::TracePhase t3("fixupSpills", &_t_fixupSpills, TimeCompiler); ) 1609 1610 // Grab the Frame Pointer 1611 Node *fp = _cfg._broot->head()->in(1)->in(TypeFunc::FramePtr); 1612 1613 // For all blocks 1614 for( uint i = 0; i < _cfg._num_blocks; i++ ) { 1615 Block *b = _cfg._blocks[i]; 1616 1617 // For all instructions in block 1618 uint last_inst = b->end_idx(); 1619 for( uint j = 1; j <= last_inst; j++ ) { 1620 Node *n = b->_nodes[j]; 1621 1622 // Dead instruction??? 1623 assert( n->outcnt() != 0 ||// Nothing dead after post alloc 1624 C->top() == n || // Or the random TOP node 1625 n->is_Proj(), // Or a fat-proj kill node 1626 "No dead instructions after post-alloc" ); 1627 1628 int inp = n->cisc_operand(); 1629 if( inp != AdlcVMDeps::Not_cisc_spillable ) { 1630 // Convert operand number to edge index number 1631 MachNode *mach = n->as_Mach(); 1632 inp = mach->operand_index(inp); 1633 Node *src = n->in(inp); // Value to load or store 1634 LRG &lrg_cisc = lrgs(_lrg_map.find_const(src)); 1635 OptoReg::Name src_reg = lrg_cisc.reg(); 1636 // Doubles record the HIGH register of an adjacent pair. 1637 src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs()); 1638 if( OptoReg::is_stack(src_reg) ) { // If input is on stack 1639 // This is a CISC Spill, get stack offset and construct new node 1640 #ifndef PRODUCT 1641 if( TraceCISCSpill ) { 1642 tty->print(" reg-instr: "); 1643 n->dump(); 1644 } 1645 #endif 1646 int stk_offset = reg2offset(src_reg); 1647 // Bailout if we might exceed node limit when spilling this instruction 1648 C->check_node_count(0, "out of nodes fixing spills"); 1649 if (C->failing()) return; 1650 // Transform node 1651 MachNode *cisc = mach->cisc_version(stk_offset, C)->as_Mach(); 1652 cisc->set_req(inp,fp); // Base register is frame pointer 1653 if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) { 1654 assert( cisc->oper_input_base() == 2, "Only adding one edge"); 1655 cisc->ins_req(1,src); // Requires a memory edge 1656 } 1657 b->_nodes.map(j,cisc); // Insert into basic block 1658 n->subsume_by(cisc, C); // Correct graph 1659 // 1660 ++_used_cisc_instructions; 1661 #ifndef PRODUCT 1662 if( TraceCISCSpill ) { 1663 tty->print(" cisc-instr: "); 1664 cisc->dump(); 1665 } 1666 #endif 1667 } else { 1668 #ifndef PRODUCT 1669 if( TraceCISCSpill ) { 1670 tty->print(" using reg-instr: "); 1671 n->dump(); 1672 } 1673 #endif 1674 ++_unused_cisc_instructions; // input can be on stack 1675 } 1676 } 1677 1678 } // End of for all instructions 1679 1680 } // End of for all blocks 1681 } 1682 1683 //------------------------------find_base_for_derived-------------------------- 1684 // Helper to stretch above; recursively discover the base Node for a 1685 // given derived Node. Easy for AddP-related machine nodes, but needs 1686 // to be recursive for derived Phis. 1687 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) { 1688 // See if already computed; if so return it 1689 if( derived_base_map[derived->_idx] ) 1690 return derived_base_map[derived->_idx]; 1691 1692 // See if this happens to be a base. 1693 // NOTE: we use TypePtr instead of TypeOopPtr because we can have 1694 // pointers derived from NULL! These are always along paths that 1695 // can't happen at run-time but the optimizer cannot deduce it so 1696 // we have to handle it gracefully. 1697 assert(!derived->bottom_type()->isa_narrowoop() || 1698 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); 1699 const TypePtr *tj = derived->bottom_type()->isa_ptr(); 1700 // If its an OOP with a non-zero offset, then it is derived. 1701 if( tj == NULL || tj->_offset == 0 ) { 1702 derived_base_map[derived->_idx] = derived; 1703 return derived; 1704 } 1705 // Derived is NULL+offset? Base is NULL! 1706 if( derived->is_Con() ) { 1707 Node *base = _matcher.mach_null(); 1708 assert(base != NULL, "sanity"); 1709 if (base->in(0) == NULL) { 1710 // Initialize it once and make it shared: 1711 // set control to _root and place it into Start block 1712 // (where top() node is placed). 1713 base->init_req(0, _cfg._root); 1714 Block *startb = _cfg._bbs[C->top()->_idx]; 1715 startb->_nodes.insert(startb->find_node(C->top()), base ); 1716 _cfg._bbs.map( base->_idx, startb ); 1717 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); 1718 } 1719 if (_lrg_map.live_range_id(base) == 0) { 1720 new_lrg(base, maxlrg++); 1721 } 1722 assert(base->in(0) == _cfg._root && 1723 _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared"); 1724 derived_base_map[derived->_idx] = base; 1725 return base; 1726 } 1727 1728 // Check for AddP-related opcodes 1729 if (!derived->is_Phi()) { 1730 assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name())); 1731 Node *base = derived->in(AddPNode::Base); 1732 derived_base_map[derived->_idx] = base; 1733 return base; 1734 } 1735 1736 // Recursively find bases for Phis. 1737 // First check to see if we can avoid a base Phi here. 1738 Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg); 1739 uint i; 1740 for( i = 2; i < derived->req(); i++ ) 1741 if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg)) 1742 break; 1743 // Went to the end without finding any different bases? 1744 if( i == derived->req() ) { // No need for a base Phi here 1745 derived_base_map[derived->_idx] = base; 1746 return base; 1747 } 1748 1749 // Now we see we need a base-Phi here to merge the bases 1750 const Type *t = base->bottom_type(); 1751 base = new (C) PhiNode( derived->in(0), t ); 1752 for( i = 1; i < derived->req(); i++ ) { 1753 base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg)); 1754 t = t->meet(base->in(i)->bottom_type()); 1755 } 1756 base->as_Phi()->set_type(t); 1757 1758 // Search the current block for an existing base-Phi 1759 Block *b = _cfg._bbs[derived->_idx]; 1760 for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi 1761 Node *phi = b->_nodes[i]; 1762 if( !phi->is_Phi() ) { // Found end of Phis with no match? 1763 b->_nodes.insert( i, base ); // Must insert created Phi here as base 1764 _cfg._bbs.map( base->_idx, b ); 1765 new_lrg(base,maxlrg++); 1766 break; 1767 } 1768 // See if Phi matches. 1769 uint j; 1770 for( j = 1; j < base->req(); j++ ) 1771 if( phi->in(j) != base->in(j) && 1772 !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs 1773 break; 1774 if( j == base->req() ) { // All inputs match? 1775 base = phi; // Then use existing 'phi' and drop 'base' 1776 break; 1777 } 1778 } 1779 1780 1781 // Cache info for later passes 1782 derived_base_map[derived->_idx] = base; 1783 return base; 1784 } 1785 1786 1787 //------------------------------stretch_base_pointer_live_ranges--------------- 1788 // At each Safepoint, insert extra debug edges for each pair of derived value/ 1789 // base pointer that is live across the Safepoint for oopmap building. The 1790 // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the 1791 // required edge set. 1792 bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) { 1793 int must_recompute_live = false; 1794 uint maxlrg = _lrg_map.max_lrg_id(); 1795 Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique()); 1796 memset( derived_base_map, 0, sizeof(Node*)*C->unique() ); 1797 1798 // For all blocks in RPO do... 1799 for( uint i=0; i<_cfg._num_blocks; i++ ) { 1800 Block *b = _cfg._blocks[i]; 1801 // Note use of deep-copy constructor. I cannot hammer the original 1802 // liveout bits, because they are needed by the following coalesce pass. 1803 IndexSet liveout(_live->live(b)); 1804 1805 for( uint j = b->end_idx() + 1; j > 1; j-- ) { 1806 Node *n = b->_nodes[j-1]; 1807 1808 // Pre-split compares of loop-phis. Loop-phis form a cycle we would 1809 // like to see in the same register. Compare uses the loop-phi and so 1810 // extends its live range BUT cannot be part of the cycle. If this 1811 // extended live range overlaps with the update of the loop-phi value 1812 // we need both alive at the same time -- which requires at least 1 1813 // copy. But because Intel has only 2-address registers we end up with 1814 // at least 2 copies, one before the loop-phi update instruction and 1815 // one after. Instead we split the input to the compare just after the 1816 // phi. 1817 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) { 1818 Node *phi = n->in(1); 1819 if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) { 1820 Block *phi_block = _cfg._bbs[phi->_idx]; 1821 if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) { 1822 const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI]; 1823 Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask ); 1824 insert_proj( phi_block, 1, spill, maxlrg++ ); 1825 n->set_req(1,spill); 1826 must_recompute_live = true; 1827 } 1828 } 1829 } 1830 1831 // Get value being defined 1832 uint lidx = _lrg_map.live_range_id(n); 1833 // Ignore the occasional brand-new live range 1834 if (lidx && lidx < _lrg_map.max_lrg_id()) { 1835 // Remove from live-out set 1836 liveout.remove(lidx); 1837 1838 // Copies do not define a new value and so do not interfere. 1839 // Remove the copies source from the liveout set before interfering. 1840 uint idx = n->is_Copy(); 1841 if (idx) { 1842 liveout.remove(_lrg_map.live_range_id(n->in(idx))); 1843 } 1844 } 1845 1846 // Found a safepoint? 1847 JVMState *jvms = n->jvms(); 1848 if( jvms ) { 1849 // Now scan for a live derived pointer 1850 IndexSetIterator elements(&liveout); 1851 uint neighbor; 1852 while ((neighbor = elements.next()) != 0) { 1853 // Find reaching DEF for base and derived values 1854 // This works because we are still in SSA during this call. 1855 Node *derived = lrgs(neighbor)._def; 1856 const TypePtr *tj = derived->bottom_type()->isa_ptr(); 1857 assert(!derived->bottom_type()->isa_narrowoop() || 1858 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); 1859 // If its an OOP with a non-zero offset, then it is derived. 1860 if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) { 1861 Node *base = find_base_for_derived(derived_base_map, derived, maxlrg); 1862 assert(base->_idx < _lrg_map.size(), ""); 1863 // Add reaching DEFs of derived pointer and base pointer as a 1864 // pair of inputs 1865 n->add_req(derived); 1866 n->add_req(base); 1867 1868 // See if the base pointer is already live to this point. 1869 // Since I'm working on the SSA form, live-ness amounts to 1870 // reaching def's. So if I find the base's live range then 1871 // I know the base's def reaches here. 1872 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or 1873 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND 1874 (_lrg_map.live_range_id(base) > 0) && // not a constant 1875 _cfg._bbs[base->_idx] != b) { // base not def'd in blk) 1876 // Base pointer is not currently live. Since I stretched 1877 // the base pointer to here and it crosses basic-block 1878 // boundaries, the global live info is now incorrect. 1879 // Recompute live. 1880 must_recompute_live = true; 1881 } // End of if base pointer is not live to debug info 1882 } 1883 } // End of scan all live data for derived ptrs crossing GC point 1884 } // End of if found a GC point 1885 1886 // Make all inputs live 1887 if (!n->is_Phi()) { // Phi function uses come from prior block 1888 for (uint k = 1; k < n->req(); k++) { 1889 uint lidx = _lrg_map.live_range_id(n->in(k)); 1890 if (lidx < _lrg_map.max_lrg_id()) { 1891 liveout.insert(lidx); 1892 } 1893 } 1894 } 1895 1896 } // End of forall instructions in block 1897 liveout.clear(); // Free the memory used by liveout. 1898 1899 } // End of forall blocks 1900 _lrg_map.set_max_lrg_id(maxlrg); 1901 1902 // If I created a new live range I need to recompute live 1903 if (maxlrg != _ifg->_maxlrg) { 1904 must_recompute_live = true; 1905 } 1906 1907 return must_recompute_live != 0; 1908 } 1909 1910 1911 //------------------------------add_reference---------------------------------- 1912 // Extend the node to LRG mapping 1913 1914 void PhaseChaitin::add_reference(const Node *node, const Node *old_node) { 1915 _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node)); 1916 } 1917 1918 //------------------------------dump------------------------------------------- 1919 #ifndef PRODUCT 1920 void PhaseChaitin::dump(const Node *n) const { 1921 uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0; 1922 tty->print("L%d",r); 1923 if (r && n->Opcode() != Op_Phi) { 1924 if( _node_regs ) { // Got a post-allocation copy of allocation? 1925 tty->print("["); 1926 OptoReg::Name second = get_reg_second(n); 1927 if( OptoReg::is_valid(second) ) { 1928 if( OptoReg::is_reg(second) ) 1929 tty->print("%s:",Matcher::regName[second]); 1930 else 1931 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second)); 1932 } 1933 OptoReg::Name first = get_reg_first(n); 1934 if( OptoReg::is_reg(first) ) 1935 tty->print("%s]",Matcher::regName[first]); 1936 else 1937 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first)); 1938 } else 1939 n->out_RegMask().dump(); 1940 } 1941 tty->print("/N%d\t",n->_idx); 1942 tty->print("%s === ", n->Name()); 1943 uint k; 1944 for (k = 0; k < n->req(); k++) { 1945 Node *m = n->in(k); 1946 if (!m) { 1947 tty->print("_ "); 1948 } 1949 else { 1950 uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; 1951 tty->print("L%d",r); 1952 // Data MultiNode's can have projections with no real registers. 1953 // Don't die while dumping them. 1954 int op = n->Opcode(); 1955 if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) { 1956 if( _node_regs ) { 1957 tty->print("["); 1958 OptoReg::Name second = get_reg_second(n->in(k)); 1959 if( OptoReg::is_valid(second) ) { 1960 if( OptoReg::is_reg(second) ) 1961 tty->print("%s:",Matcher::regName[second]); 1962 else 1963 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), 1964 reg2offset_unchecked(second)); 1965 } 1966 OptoReg::Name first = get_reg_first(n->in(k)); 1967 if( OptoReg::is_reg(first) ) 1968 tty->print("%s]",Matcher::regName[first]); 1969 else 1970 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), 1971 reg2offset_unchecked(first)); 1972 } else 1973 n->in_RegMask(k).dump(); 1974 } 1975 tty->print("/N%d ",m->_idx); 1976 } 1977 } 1978 if( k < n->len() && n->in(k) ) tty->print("| "); 1979 for( ; k < n->len(); k++ ) { 1980 Node *m = n->in(k); 1981 if(!m) { 1982 break; 1983 } 1984 uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; 1985 tty->print("L%d",r); 1986 tty->print("/N%d ",m->_idx); 1987 } 1988 if( n->is_Mach() ) n->as_Mach()->dump_spec(tty); 1989 else n->dump_spec(tty); 1990 if( _spilled_once.test(n->_idx ) ) { 1991 tty->print(" Spill_1"); 1992 if( _spilled_twice.test(n->_idx ) ) 1993 tty->print(" Spill_2"); 1994 } 1995 tty->print("\n"); 1996 } 1997 1998 void PhaseChaitin::dump( const Block * b ) const { 1999 b->dump_head( &_cfg._bbs ); 2000 2001 // For all instructions 2002 for( uint j = 0; j < b->_nodes.size(); j++ ) 2003 dump(b->_nodes[j]); 2004 // Print live-out info at end of block 2005 if( _live ) { 2006 tty->print("Liveout: "); 2007 IndexSet *live = _live->live(b); 2008 IndexSetIterator elements(live); 2009 tty->print("{"); 2010 uint i; 2011 while ((i = elements.next()) != 0) { 2012 tty->print("L%d ", _lrg_map.find_const(i)); 2013 } 2014 tty->print_cr("}"); 2015 } 2016 tty->print("\n"); 2017 } 2018 2019 void PhaseChaitin::dump() const { 2020 tty->print( "--- Chaitin -- argsize: %d framesize: %d ---\n", 2021 _matcher._new_SP, _framesize ); 2022 2023 // For all blocks 2024 for( uint i = 0; i < _cfg._num_blocks; i++ ) 2025 dump(_cfg._blocks[i]); 2026 // End of per-block dump 2027 tty->print("\n"); 2028 2029 if (!_ifg) { 2030 tty->print("(No IFG.)\n"); 2031 return; 2032 } 2033 2034 // Dump LRG array 2035 tty->print("--- Live RanGe Array ---\n"); 2036 for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) { 2037 tty->print("L%d: ",i2); 2038 if (i2 < _ifg->_maxlrg) { 2039 lrgs(i2).dump(); 2040 } 2041 else { 2042 tty->print_cr("new LRG"); 2043 } 2044 } 2045 tty->print_cr(""); 2046 2047 // Dump lo-degree list 2048 tty->print("Lo degree: "); 2049 for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next ) 2050 tty->print("L%d ",i3); 2051 tty->print_cr(""); 2052 2053 // Dump lo-stk-degree list 2054 tty->print("Lo stk degree: "); 2055 for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next ) 2056 tty->print("L%d ",i4); 2057 tty->print_cr(""); 2058 2059 // Dump lo-degree list 2060 tty->print("Hi degree: "); 2061 for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next ) 2062 tty->print("L%d ",i5); 2063 tty->print_cr(""); 2064 } 2065 2066 //------------------------------dump_degree_lists------------------------------ 2067 void PhaseChaitin::dump_degree_lists() const { 2068 // Dump lo-degree list 2069 tty->print("Lo degree: "); 2070 for( uint i = _lo_degree; i; i = lrgs(i)._next ) 2071 tty->print("L%d ",i); 2072 tty->print_cr(""); 2073 2074 // Dump lo-stk-degree list 2075 tty->print("Lo stk degree: "); 2076 for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next ) 2077 tty->print("L%d ",i2); 2078 tty->print_cr(""); 2079 2080 // Dump lo-degree list 2081 tty->print("Hi degree: "); 2082 for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next ) 2083 tty->print("L%d ",i3); 2084 tty->print_cr(""); 2085 } 2086 2087 //------------------------------dump_simplified-------------------------------- 2088 void PhaseChaitin::dump_simplified() const { 2089 tty->print("Simplified: "); 2090 for( uint i = _simplified; i; i = lrgs(i)._next ) 2091 tty->print("L%d ",i); 2092 tty->print_cr(""); 2093 } 2094 2095 static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) { 2096 if ((int)reg < 0) 2097 sprintf(buf, "<OptoReg::%d>", (int)reg); 2098 else if (OptoReg::is_reg(reg)) 2099 strcpy(buf, Matcher::regName[reg]); 2100 else 2101 sprintf(buf,"%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer), 2102 pc->reg2offset(reg)); 2103 return buf+strlen(buf); 2104 } 2105 2106 //------------------------------dump_register---------------------------------- 2107 // Dump a register name into a buffer. Be intelligent if we get called 2108 // before allocation is complete. 2109 char *PhaseChaitin::dump_register( const Node *n, char *buf ) const { 2110 if( !this ) { // Not got anything? 2111 sprintf(buf,"N%d",n->_idx); // Then use Node index 2112 } else if( _node_regs ) { 2113 // Post allocation, use direct mappings, no LRG info available 2114 print_reg( get_reg_first(n), this, buf ); 2115 } else { 2116 uint lidx = _lrg_map.find_const(n); // Grab LRG number 2117 if( !_ifg ) { 2118 sprintf(buf,"L%d",lidx); // No register binding yet 2119 } else if( !lidx ) { // Special, not allocated value 2120 strcpy(buf,"Special"); 2121 } else { 2122 if (lrgs(lidx)._is_vector) { 2123 if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs())) 2124 print_reg( lrgs(lidx).reg(), this, buf ); // a bound machine register 2125 else 2126 sprintf(buf,"L%d",lidx); // No register binding yet 2127 } else if( (lrgs(lidx).num_regs() == 1) 2128 ? lrgs(lidx).mask().is_bound1() 2129 : lrgs(lidx).mask().is_bound_pair() ) { 2130 // Hah! We have a bound machine register 2131 print_reg( lrgs(lidx).reg(), this, buf ); 2132 } else { 2133 sprintf(buf,"L%d",lidx); // No register binding yet 2134 } 2135 } 2136 } 2137 return buf+strlen(buf); 2138 } 2139 2140 //----------------------dump_for_spill_split_recycle-------------------------- 2141 void PhaseChaitin::dump_for_spill_split_recycle() const { 2142 if( WizardMode && (PrintCompilation || PrintOpto) ) { 2143 // Display which live ranges need to be split and the allocator's state 2144 tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt); 2145 for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) { 2146 if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) { 2147 tty->print("L%d: ", bidx); 2148 lrgs(bidx).dump(); 2149 } 2150 } 2151 tty->cr(); 2152 dump(); 2153 } 2154 } 2155 2156 //------------------------------dump_frame------------------------------------ 2157 void PhaseChaitin::dump_frame() const { 2158 const char *fp = OptoReg::regname(OptoReg::c_frame_pointer); 2159 const TypeTuple *domain = C->tf()->domain(); 2160 const int argcnt = domain->cnt() - TypeFunc::Parms; 2161 2162 // Incoming arguments in registers dump 2163 for( int k = 0; k < argcnt; k++ ) { 2164 OptoReg::Name parmreg = _matcher._parm_regs[k].first(); 2165 if( OptoReg::is_reg(parmreg)) { 2166 const char *reg_name = OptoReg::regname(parmreg); 2167 tty->print("#r%3.3d %s", parmreg, reg_name); 2168 parmreg = _matcher._parm_regs[k].second(); 2169 if( OptoReg::is_reg(parmreg)) { 2170 tty->print(":%s", OptoReg::regname(parmreg)); 2171 } 2172 tty->print(" : parm %d: ", k); 2173 domain->field_at(k + TypeFunc::Parms)->dump(); 2174 tty->print_cr(""); 2175 } 2176 } 2177 2178 // Check for un-owned padding above incoming args 2179 OptoReg::Name reg = _matcher._new_SP; 2180 if( reg > _matcher._in_arg_limit ) { 2181 reg = OptoReg::add(reg, -1); 2182 tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg)); 2183 } 2184 2185 // Incoming argument area dump 2186 OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots()); 2187 while( reg > begin_in_arg ) { 2188 reg = OptoReg::add(reg, -1); 2189 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg)); 2190 int j; 2191 for( j = 0; j < argcnt; j++) { 2192 if( _matcher._parm_regs[j].first() == reg || 2193 _matcher._parm_regs[j].second() == reg ) { 2194 tty->print("parm %d: ",j); 2195 domain->field_at(j + TypeFunc::Parms)->dump(); 2196 tty->print_cr(""); 2197 break; 2198 } 2199 } 2200 if( j >= argcnt ) 2201 tty->print_cr("HOLE, owned by SELF"); 2202 } 2203 2204 // Old outgoing preserve area 2205 while( reg > _matcher._old_SP ) { 2206 reg = OptoReg::add(reg, -1); 2207 tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg)); 2208 } 2209 2210 // Old SP 2211 tty->print_cr("# -- Old %s -- Framesize: %d --",fp, 2212 reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize); 2213 2214 // Preserve area dump 2215 int fixed_slots = C->fixed_slots(); 2216 OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots()); 2217 OptoReg::Name return_addr = _matcher.return_addr(); 2218 2219 reg = OptoReg::add(reg, -1); 2220 while (OptoReg::is_stack(reg)) { 2221 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg)); 2222 if (return_addr == reg) { 2223 tty->print_cr("return address"); 2224 } else if (reg >= begin_in_preserve) { 2225 // Preserved slots are present on x86 2226 if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word)) 2227 tty->print_cr("saved fp register"); 2228 else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) && 2229 VerifyStackAtCalls) 2230 tty->print_cr("0xBADB100D +VerifyStackAtCalls"); 2231 else 2232 tty->print_cr("in_preserve"); 2233 } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) { 2234 tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg)); 2235 } else { 2236 tty->print_cr("pad2, stack alignment"); 2237 } 2238 reg = OptoReg::add(reg, -1); 2239 } 2240 2241 // Spill area dump 2242 reg = OptoReg::add(_matcher._new_SP, _framesize ); 2243 while( reg > _matcher._out_arg_limit ) { 2244 reg = OptoReg::add(reg, -1); 2245 tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg)); 2246 } 2247 2248 // Outgoing argument area dump 2249 while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) { 2250 reg = OptoReg::add(reg, -1); 2251 tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg)); 2252 } 2253 2254 // Outgoing new preserve area 2255 while( reg > _matcher._new_SP ) { 2256 reg = OptoReg::add(reg, -1); 2257 tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg)); 2258 } 2259 tty->print_cr("#"); 2260 } 2261 2262 //------------------------------dump_bb---------------------------------------- 2263 void PhaseChaitin::dump_bb( uint pre_order ) const { 2264 tty->print_cr("---dump of B%d---",pre_order); 2265 for( uint i = 0; i < _cfg._num_blocks; i++ ) { 2266 Block *b = _cfg._blocks[i]; 2267 if( b->_pre_order == pre_order ) 2268 dump(b); 2269 } 2270 } 2271 2272 //------------------------------dump_lrg--------------------------------------- 2273 void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { 2274 tty->print_cr("---dump of L%d---",lidx); 2275 2276 if (_ifg) { 2277 if (lidx >= _lrg_map.max_lrg_id()) { 2278 tty->print("Attempt to print live range index beyond max live range.\n"); 2279 return; 2280 } 2281 tty->print("L%d: ",lidx); 2282 if (lidx < _ifg->_maxlrg) { 2283 lrgs(lidx).dump(); 2284 } else { 2285 tty->print_cr("new LRG"); 2286 } 2287 } 2288 if( _ifg && lidx < _ifg->_maxlrg) { 2289 tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx)); 2290 _ifg->neighbors(lidx)->dump(); 2291 tty->cr(); 2292 } 2293 // For all blocks 2294 for( uint i = 0; i < _cfg._num_blocks; i++ ) { 2295 Block *b = _cfg._blocks[i]; 2296 int dump_once = 0; 2297 2298 // For all instructions 2299 for( uint j = 0; j < b->_nodes.size(); j++ ) { 2300 Node *n = b->_nodes[j]; 2301 if (_lrg_map.find_const(n) == lidx) { 2302 if (!dump_once++) { 2303 tty->cr(); 2304 b->dump_head( &_cfg._bbs ); 2305 } 2306 dump(n); 2307 continue; 2308 } 2309 if (!defs_only) { 2310 uint cnt = n->req(); 2311 for( uint k = 1; k < cnt; k++ ) { 2312 Node *m = n->in(k); 2313 if (!m) { 2314 continue; // be robust in the dumper 2315 } 2316 if (_lrg_map.find_const(m) == lidx) { 2317 if (!dump_once++) { 2318 tty->cr(); 2319 b->dump_head(&_cfg._bbs); 2320 } 2321 dump(n); 2322 } 2323 } 2324 } 2325 } 2326 } // End of per-block dump 2327 tty->cr(); 2328 } 2329 #endif // not PRODUCT 2330 2331 //------------------------------print_chaitin_statistics------------------------------- 2332 int PhaseChaitin::_final_loads = 0; 2333 int PhaseChaitin::_final_stores = 0; 2334 int PhaseChaitin::_final_memoves= 0; 2335 int PhaseChaitin::_final_copies = 0; 2336 double PhaseChaitin::_final_load_cost = 0; 2337 double PhaseChaitin::_final_store_cost = 0; 2338 double PhaseChaitin::_final_memove_cost= 0; 2339 double PhaseChaitin::_final_copy_cost = 0; 2340 int PhaseChaitin::_conserv_coalesce = 0; 2341 int PhaseChaitin::_conserv_coalesce_pair = 0; 2342 int PhaseChaitin::_conserv_coalesce_trie = 0; 2343 int PhaseChaitin::_conserv_coalesce_quad = 0; 2344 int PhaseChaitin::_post_alloc = 0; 2345 int PhaseChaitin::_lost_opp_pp_coalesce = 0; 2346 int PhaseChaitin::_lost_opp_cflow_coalesce = 0; 2347 int PhaseChaitin::_used_cisc_instructions = 0; 2348 int PhaseChaitin::_unused_cisc_instructions = 0; 2349 int PhaseChaitin::_allocator_attempts = 0; 2350 int PhaseChaitin::_allocator_successes = 0; 2351 2352 #ifndef PRODUCT 2353 uint PhaseChaitin::_high_pressure = 0; 2354 uint PhaseChaitin::_low_pressure = 0; 2355 2356 void PhaseChaitin::print_chaitin_statistics() { 2357 tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies); 2358 tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost); 2359 tty->print_cr("Adjusted spill cost = %7.0f.", 2360 _final_load_cost*4.0 + _final_store_cost * 2.0 + 2361 _final_copy_cost*1.0 + _final_memove_cost*12.0); 2362 tty->print("Conservatively coalesced %d copies, %d pairs", 2363 _conserv_coalesce, _conserv_coalesce_pair); 2364 if( _conserv_coalesce_trie || _conserv_coalesce_quad ) 2365 tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad); 2366 tty->print_cr(", %d post alloc.", _post_alloc); 2367 if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce ) 2368 tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.", 2369 _lost_opp_pp_coalesce, _lost_opp_cflow_coalesce ); 2370 if( _used_cisc_instructions || _unused_cisc_instructions ) 2371 tty->print_cr("Used cisc instruction %d, remained in register %d", 2372 _used_cisc_instructions, _unused_cisc_instructions); 2373 if( _allocator_successes != 0 ) 2374 tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes); 2375 tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure); 2376 } 2377 #endif // not PRODUCT